<$button tooltip="View the top level of the Author Index">
{{$:/core/images/up-arrow}} Author Index
<$action-navigate $to="Author Index"/>
</$button>
\rules except wikilink

@@.cpredtext
!!! Press to Save {{$:/core/ui/Buttons/save-wiki}}
@@

!!! Problem Tiddlers

Count of subscript/superscript: <$count filter="[regexp[,,]] [regexp[\^\^]]" />, Count of missing: <$count filter="[all[missing]sort[title]]" />   <$link to="$:/causal/ProblemTiddlers"><$button>View</$button></$link>

!!! Sidebar Tabs
| <$fieldmangler tiddler="$:/core/ui/SideBar/More"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/core/ui/SideBar/More"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler> |[[$:/core/ui/SideBar/More]] |
| <$fieldmangler tiddler="$:/core/ui/SideBar/Tools"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/core/ui/SideBar/Tools"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/core/ui/SideBar/Tools]] |
| <$fieldmangler tiddler="$:/core/ui/SideBar/Recent"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/core/ui/SideBar/Recent"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/core/ui/SideBar/Recent]] |
| <$fieldmangler tiddler="$:/core/ui/SideBar/History"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/core/ui/SideBar/History"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/core/ui/SideBar/History]] |
| <$fieldmangler tiddler="$:/plugins/wimmoermans/history/HistoryTab"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/plugins/wimmoermans/history/HistoryTab"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/plugins/wimmoermans/history/HistoryTab]] |
| <$fieldmangler tiddler="$:/causal/Causal Productions History View"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/causal/Causal Productions History View"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/causal/Causal Productions History View]] |

!!! Sidebar Buttons
| <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home" text="hide"/>Remove</$button> |[[$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home]] |
| <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-tiddler" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-tiddler" text="hide"/>Remove</$button> |[[$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-tiddler]] |
| <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/control-panel" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/control-panel" text="hide"/>Remove</$button> |[[$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/control-panel]] |
| <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/save-wiki" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/save-wiki" text="hide"/>Remove</$button> |[[$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/save-wiki]] |

!!! Toolbar Buttons
| <$button><$action-setfield $tiddler="$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions" text="hide"/>Remove</$button> |[[$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions]] |
| <$button><$action-setfield $tiddler="$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/edit" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/edit" text="hide"/>Remove</$button> |[[$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/edit]] |

!!! Causal PDF/MEDIA Display Configuration
| <$button><$action-setfield $tiddler="$:/causal/config/hidePDFandMEDIA" text="hide"/>Hide</$button> | <$button><$action-setfield $tiddler="$:/causal/config/hidePDFandMEDIA" text="show"/>Show</$button> |[[$:/causal/config/hidePDFandMEDIA]] |

<hr>

!!! Style Sheets
<<list-links "[tag[$:/tags/Stylesheet]]">>

{{{[history[]]}}}

<$button>Clear History<$action-setfield $tiddler="$:/HistoryList" text=""></$button>

show
<$button tooltip="View the session which holds this paper">
{{$:/core/images/up-arrow}} This Session
<$action-navigate $to={{!!current_session}}/>
</$button>
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 13.0.2, SVG Export Plug-In . SVG Version: 6.00 Build 14948)  -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="welcome" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
	 width="843.25px" height="595.276px" viewBox="0 0 843.25 595.276" enable-background="new 0 0 843.25 595.276"
	 xml:space="preserve">
<g>
	<defs>
		<rect id="SVGID_1_" x="0.227" y="0" width="841.89" height="595.275"/>
	</defs>
	<clipPath id="SVGID_2_">
		<use xlink:href="#SVGID_1_"  overflow="visible"/>
	</clipPath>
	<g clip-path="url(#SVGID_2_)">
		
			<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAC7qAABfBAAAoEr/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xADrAAEAAwEBAQEAAAAAAAAAAAAAAQIDBAUGBwEBAQEBAQEAAAAAAAAAAAAAAAECAwQFEAACAgEC
BAIIBQQDAQEBAQAAARECAxIEECETBSAUMDEiMjM0FRZAQSM1BlBgJCVCQ0RFgDZwEQABAwEDBA4G
BwYFBQEBAQABABECITESA0FRcQQQYYGRscEiMnKSE3M0BSBQodGy0jBAYOFCUrPwYsIjMxSCQ1Nj
dHDxouKDgESjEgACAAEHCQcEAgEFAQAAAAAAAREhQXGBoQIyEEAxUbHBEoKyIDBQYZFScmDwImJC
A9LR8ZKiwnP/2gAMAwEAAhEDEQAAAP0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAglTOzdneWUSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAMuT5Xv5/b8bhezwTB05OnmL73u/CW4ej9Dn573/F9CyJxsAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwdfw/fz4683p+75vmJ
jWTT2M78N3cNyFj6D5+c7/RJ8n1fmfXkZ0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAB4Py/fwfT+RPv/P+xZ5EG+f1Hu+V63zPr5fA/oXwHfzYj2eE6ktvufmvY8fv
7h5fYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAApfCz4bH3OD6fx
+L2PH9pfFdWesfU+t5vpfM+vP5/+gfEd/Nx19ro9Pl4tK3zr0U5cPR7w8vtAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY7Vs+Y6Mur2/O+a+l+d9fpzrv432HLty+n4
dOXb1/Bv69zPFy9Vnmynt5/Rrbo4en0R5vYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAiR4+fofK+vw+jfy57cPR6PFiX6jo+Q+p8/qtxeVy9OPrbeJPTn6tfLhPd9f
xPd8nukcu4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEeN7Uax8
T0d3D7/m82vJvvnr2cNc7vPN03OXRx9hXefouPovc8X0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAABHF3Rc/FcH6Jx+vx/HdHszvl81f3uqvnfd9e/n9NLnn9SQA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARIhIhIhIAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA82vzWXr8X1U/K2s+ofLj6h8tJ
9RPy8n075iT6afmZX6Wfm0fRvnR9FPzhfop+dJ9E+eL9DPz0x9A8CV96fAk954Ux7seJK+28ST2p
8WY9l40r7DyJPXeQPXeRMes8mV9V5Unqx5Y9V5RfVeUPVeUPVeUPVeXB6rypPUeUPVeUT1XlD1Y8
svqT5cx6bzC+nPnWmu951E9OfJWeq8qT1J8vXO/Qtx2zvrcya6Z5UdLmk6XndUvQwsazkk1ZVNmM
LvGA3c46HPC9Mc0HU5Kr2OKK7nFJ1uax0OdHQ5pOieaDqjkqdscNF9GPMpXqR5Na9l45PIy1y+j8
ebVtcpiSJCSQCZiSREzEgloTCYlZmJiSVEyzMSSIWiVTErJMqYkAkSyBIBKAAAkAISAAIWtLS0lh
prneV7UzqcoprCDWIBN53z0rpM8+ySaTNpazoypN5l5OimsVlBMIBVZisLKsLZSC6gtEWKtJKWtK
RMgACEVVWKLNIzWaRTSa1rqWpFLnZmueDLXL3fIm1bXMzEkSEgSEkomJiQ0klkkSLMxMSTKJWSYT
EqkWREyLIBMCVAkSgACQATESESCJutL3nO6L6TWWl2dwtC0zvlrGdbV3yJuU2trjrFpc+sL2lpe1
5a2tMkJRWJg59vM9ObJqiqqqqtIipasRVkJbWrJaa2kmYkmRJRBaKwXrWqzWtbZzUtUU1FYprNqV
prFqRW51QueXLXL1/Mm0TZMxImJBIkEiTMTKkVMWlEiSWZJUxZRIJlTEkzEypFSEiVMSSJUxIIJR
IJIkBMAoutb31zvO2jHTOdby5W2tnWDaplTatmGXTnrHNHROs5662x0zta2d1ta0UteYrcQCEiIm
I4Ovl6M9LUVpVVpVUVmKIFlZWbUtFrUk0nOZLqQXilV0rSppTOlutMqbmlKV1m1Yi5UV1hEV1mYr
TXPoZrnLLXPv4pmJssSASSiYkTExJMoNTITMTKmLSiVSRJKgTMTLJKhEhZmJBMoAkAAkASySROnT
npz77X59cbbXxrK15KzKSIktFhnXSFyjZWNtBVdFbTJEpQASkSEEKoxmuPp+Y0nb6StYZmsQTBUL
QRW0UQiypbzmjRlBrGNV2plStq5V1NK0WTEV1m0VrrnakV1zmsRvkrMa51iyrrJcc9cuvmm1bWTM
SSiSQSImYmEipiypiZZJlSSySCZUxImJWSRMTKmJExMqYkSKBJpLm0zCYRMTalvnVOjXfl2pe843
WwgAAAgRMxVaFiJghBZESiSRZKJBBMRSW9K5zVuTajfy3R2aOneHMlREExFatWtS8UgvFK1pGcJe
M4q9axZaqElCyYiLFZjeETF51iy5pF66zUrcTEVs3ULXPXPp50lkymExIkEhJMqYlU2nOqyC1r53
lNpKpISBKkoSKkWRCQkLIhNuvPTO+tefbnb56xjTau+edp3ldNd+PebxObIsAAAERKBMIWYViaoV
NZWUSTNSXUFkBCqqqyxSYarWar5ulL56d0UjfO8ViyykVasRZMRVJrEWTERcoRqTEEQIIqUAkkRN
dSYrTXO9axrnNYaxESSsWitEs6jLXLpxmUokEhIiSRKZpaOnPSI6a8/RzW160zjqjl6OS2655Y64
ueOeqbOXTqjOuXPpz1zwa575iUEiRUpljR3Y6tLuPopS9EyrpXeKVve5rva2Oi8TmzaJSRYAIJiI
llWJbs4NIoWysFogTNZEWsUtYgECIpaq1rMNVperVK3rXmW1Y6bxNevFExrEVmNZiJi5gi5iJhIi
YqItBCYSBZESsgrYiFyguYiVkRMWRFoSImCItBolNVz0z6cZmJSREzAmYkTFlkTW3fj1+b30pqx0
jprpjcUvQrMzZFrIrNoIztTUrW+esZ46Z9OBE65zNZWZjXOq636MdnRSePW9K1JiItiszrLSt82b
Jkm1SXZjRRV1KmlcqTW1cay7RjC7MZXacJTZlJqzsmls5TSaE0UVeKiYIiJha1vVaVtRrPxtPIej
pz6659FPX+X9Dfn9qIjr4JhGsqo1hEwyiRVeWsmyXGdYM5tVVZrZFNFzhG8XOLWtlFosrEwkRMXN
UxUJg0Slplrl04TMTZM1mJmBMwLTWZq2uPbjt2Wyt5ffeaRL0W5rRtGY1nGU2Zo0rStXrStWziu+
cVV3ylDWJs6JqNp05dpvRm3ikS3rWFtFYq98dDS+ZnRnKXUF5pBpGdF1zzpNaRnW3SucGsZl1ZQb
ThJtbCY6LYWTe2EpvOEpsyJsyGrKDWMYXauNV1zpnXgcnfzPTLryzeTbHu3z9VnPXw2VmyU2lraU
0QWaoESIi0rSbzLRaFitoKVvS5rWa6xERXfOawuFZi5QiwQuyGdUy0z6+aZrNlkIsgtprJMwmtOv
j35+jq059OHp0iiWzOF1YSb255joc5N64xWsYrNa0jWLKtYtal7nbfn259d5yZaM4XSKQaRQXVsW
vnCaxnBrOQ1ZE1rnRdK0hq0Vqt4pCzFal4rFl1INJyGs4yu9ueY6Z55jpnmlOmOeK6XMOiOdZvXG
LNq5RZrSlbnHzPYnO/E623Pty91p7ee6rXK80Gk5zNXmia0ULdSZZmpbTSJdIzhdIzrZrXKlxrTO
N4vSsaxaIi4mEWEEQBBdkJrPLXLp5pms2WVlbIRaaytlZlvpjrnp03xvx9V1IltWtC85RWzGDdzy
m0ZDScps0UXF5znWdb42jo05746bTjMurMmrODWchsyJqyg1ZJdWStoxg1jOF1rSIvFItvWsForB
ZQWrEVZQXUGk5DZjJqylNWU2aM1mihm8UWXisFoiEspGdrcHRy9G00nt5rqzrMzAshNWmqaupEuj
KJdYyizWM5LVV1lEVuLVhckRZJZaNIKRatiJlKxomqTaZqy7O+XLXH0eGZgkzBbTWZZmCzMTLN89
M71vlPPvpFKratapdSLLxWC6k2XVlLTSbLKylppNlr52jW2Vs9NJytLpOSNWI2YyashqyGrIaxlB
qyLozg1jODWMlaRmLqCysFlYLKrJVFlRaaSl1ZubTUWVWWVgurBZWEtEQWirO8Y0c+2k0t142tWU
sgsq1LxQllbLEyExEtlYJhFKzLNGtlyvZNSiFUuTONIKLVWIQFVbIZ1hjtj6PnyLJmLzS02z1rF4
hZaWLomkV5F7I5R0RhprFkEkkSImFTNZiZqLWrvKrZNRMQtmcS6RmNGQ1YjeMhqyGjIazkNGYvFR
KsVdQXnMmikkokAJmqrSlViRMiJAASVWgqtBETCImarGlM64d9cMdOnTK+s2Uiy8UWWhFkzVF5oW
6qWysLZSSZrMXtmNIqLTQWUiLxnmu8cy3eqKAIg6FGdY47Y9/Ekstrnpjraaznc1QXtnZb1RGfB3
8GsY2mbKel5/oiUoJESQAkqzSataEQRKrMLWLKrFhVIhIhIhNik3uZWvKUm0REaGsI1XOTWTKdJM
2gpNhWZETAmIEqiYWKzeUrp1ety7/ORMdeERJYi0ESBEROdqLhpjE302ra5JXJIq0kzm8FUpYiYW
IAgSgWmosrBdSC8UheO+OVzvvyQnpKGrRVVorCdCrOoy2x6+ZIWmszVlbTUEqkmrTWIpxdXLc1tW
9zT0fO9JZSQASRMSCS16a52mECBAREwsRIiJiiBaaSXnOTSaIvNJSyo6Bnpiq1zvFVWVRZUWVirK
C0VFoTYstCYFo7LY6b+j53peb1/LqPX4bREFohbKIJgIpektc40zq166azFkomBJCTCFQgiGaxbh
mu1W0GsFKxlbfFlHVNLazKJOPPTPWNKXpHcBErIixdFmNxj0Y6xWbRcRMhMWVaLZ6ImJYhFmPN08
1zFq2K+n5vqZ0SWEiErITZKzeJZtSYlWFvFYLxWCysForFWiolUllZqyqW6ouoLqSenPoz5vV85N
Xp8tpoLKiyoklITNQkkSEoiNL99uXfbanVw9Pn+p5nqS/HD2+BNpiJkggQgZ3zaxz6Ky6XztZec5
S7MaRQXisLaKxU8HXgYx6JOPqpEvTWeCXbNStsLE2vxdOs6Jmzix6eU2z1yT0BcwmAF2QxvTLXJu
0SSIvS5TSS6ABVabOXn6Odlal4r6fm+jN6IiamKrLTUXtRF4qLTUTATcIqgLDOulapF4K3r0HMLE
rrvj9F5HHr58nbiB65Xz+ny0T38yYkSlVoFkSSiUh7PBjpzX+jnn1+XS7+X31nk97p5+jOuL0vO9
GX46Jr7vBaejqxrzUV1i7MXikFqOlrz9PrfmOXWkw68ZmFkgBIVg14+rnTTTC+el7Z5S9LnLtxa4
2Xy1wsr6HF6EcOsXrdLXPl4/RwWmWuVz6KwqsWqU1sMbrlfHeNJylNK1xTe/H0VorJZAlA5+bp52
Y0peWvfwdq3iAmpbqC6kl5pMujMXUGkUFlRdQWj1ssdPOJ3z7PWz383q+aaU9Plm3p+fnf1/zn0f
geX1+PH0Xz/q8dVm8eql5/T5CY9HmCO70eP0/P6vm5rPo822fXxy6V9Lhl+n4e7g8vs9CYvjp8dp
lp7/AJn0Zbx++vTz9GdcXpeb6R8nz+96Hfz8Xp+d6PD0fI4dXP7fBR7lMdPFWnpx9LD6j5zzev6T
576H5/n18yn2PyXo8tCevAAmpydPPqVrj0zWO8M7i1LJtlrmc1L52x2cnRZr18V2epy2TocUr2Rw
anFj0Tb1Ii5tFareMKR6jmZ3OOuPTjZC5yw3586z7+Dr0668Vo7Z8zrrpmsyc/PvgL00lp6Hn963
hKxFhVZFVi1SSJAktViVXFL/AFG3D0+b6vm+tx7/ABUep1+nyW1pPD0x5fs8lz3+D9P4q+343s+N
jfp/N/S+VvEeP9T8x14etX13H0fILe96fJ5Po5+zw9Hh+lyejNfNz7Ft8t/G9/xMdPd8X3fKzvu4
e/gPQtW3Pp8n3dHd6PNS2W3LtXp5umXi9HzfSPM9Hz/Qrz/S8z04+d6r+p05eZ0Yb515GP0HH05+
p879F85z7fSeB7/zkv0Xw/3HyusTze74Xo8rHXjvPonlvY1x2OTq5d2rRaM7tty6JvWkJHL0891n
CNZ6tsLpM1tLTTKSNubQppnrnVkTqRjSa4t8N2NkMderHfLr54tM2cnP082dU6ebotSmXmGserbm
2zvHn6Oe4XreWnoef6KyAQoAAkhIiZETOkdnN9b5nm9fV083Tx9Hmen5fqXPlej5nq2ebNd9Znh9
Hhl9jx/Y8eX2PA9/wV9nzvR849T5L6z5fpx9lW2deF9b8v8AUaz5fdw92N8fpcHoTXm2rOsdvjez
5C+55fqeZjfbwd/BrPozE8+nkd/n+h05YdHP0zVOnm6prz/S870U830PP9CvO9PzPTjyvU8r1bPN
3w6LLcfZyHp/OfR/OzX0fzP03zx9B879F4B3/O/S/NdvNTl7MO3nyuTNrVtbydGHQ1fPTLO2uV01
qGfP0YW4oaz16ZaJXm6/MVty6XPVmrNW7eHvzYx25l40x0422w2zvcY6dmeuXTglNnJzdHNLTp5u
i2LRMc0o3nS+V5ern2xkXppLT0vN9NSZISWEpISqEiJICno+d246fTcfFXy+z1dvGg7uzxh0+h4s
2d9/Mk9TDzJl97j8zY9ni8ndPa5vNuvs8fFNnbfz4Tu6/IrL3dPk1r0unxqS+lPBWz1+fhR7OXll
9bn4SevPh0X0ujw41j2dvm++a9PXxrS9/T41D0unwcrPa6vnLy+r1eHB6mnjwe1l5Vz2+Dloe753
JifQ8PBY9f5n0PO6cefm6eXt5qzEXN9cNVw35uhrTO9c7i9LJoLM+ffBrGE7x0a52iOHs46jt4e5
MtMtZqnVydedMdsZeCl568ZvF8dLIV6OWM3G0zypK5csunkS1s7GCJ3heljakTm2vW0tPT830mpQ
JmqLIE0viVyZ2dHT53oqEJjBfpuXow83q6efo5zammY0z0MdscrI6uHtl5PQ870Ty/Q8/wBKzj35
L2dMxynZnrnLrz562dHF3cMvZ5npeYety7YnRnpxHXExLGmehxbY7anB6PJ2x53ocnavN18XXLjS
Ismu1bm3N18cvdkuX4+rkl7cN+derDblTfn6MJd08FnV5fp+R05Ww35t8aoJOlLVlpz9LWtL0zuu
lLpMxJlz9PNbiRvHVamkRw9XOZ93P01hrnpLn2cm0t8tc864ladOduvj2zbLLc4nDXPemV09FW+O
mPJ18W+c3pNlZibIvnpVpM21oRX1fL9SaBRIEMtaHLnpnZPo+b6KypSXTj6+W50387WX6rHm08/q
66YXl008/TWd+Lp50ns4O2ay7OXrl83u5OqzgtXy9493ky49Z+ky1y49uDs8T0evL1eC1cbwrpTW
erTLWXfyvU8M9uJrjdNsN14unDe55u3mvLl2cvUvH1cvUcts5ucuzw/U1js49ebO+X0fL9TfPh6M
NJv0ebLXN6fD9nxrPZyRnevl+n5G+fp+L2cusa8/RhrlnNLXNbpXn6fO9C62ramNxNdUWrBXn3yX
njXHeOnDWhhS19TLrrpLXTPSXkz6R10rnnfMtj05dWO3NHc50ac3VzakTNrOqKXzrPl6+TWJtRZK
FTfK5oiube2ZNfT8vrm+mfNtL3nmx6c+Z3VrnphGNL5XM+j5notZcO+Nz2V5JNJpC9NuLoj1b5+d
x9HdlwW68foIwnj2xy4rdefu83HtjXb1cmGOlvN6ebpz7OHn23y+nzZef1eLpht6PN1xzxnXdXox
x1yvyU3y97xPQ8nO/pMaebnXqac3RjfN1cs6nLOFenD1ezjz5d9Orl6prmRezxvS4uzfLq5unnx1
8z0PG17+bu15nPrW3Jrvn7nk+l4fLr7aK530eF6XjdOfZjz6b5bYsjRz0uOy3Fu1Xa1ZrbFit98c
k73HzJ6ePHlW3PLWZvTsuee/bUyiuM3s4dE6mErfKedBFnZyxVelDOr4bc2s6WxnWdc0ovIvmtGU
zWqurlq01RpbOCV+s5Y7eSS9ubZqnThB3040u0ZQzeKWralJiStWiqy60S7dF5zvlnWtnVbl9Hn1
8Wc56cdpwtL7uGvPx9HFbnt1457dV09PPp5vP6fA6+W3o8/ZjWI9HHLnmt+SMd59HhpZnbXimPc7
fn78u3p0ZtcDlr28/wBJyaY8e/Z2cHbjpz6Y6J89OL0+T3tfB059uWUdeNoiI0tWI7ceXZr6P5/v
8rG4raOnKLJIpaGqpXMLCrSkREqiNIJpeJa10iqNJjO0yVgXNoszawuTWDNoTNcudpHQhnXRz9OP
TjVabKNBm0kzakzajOuxc2slI0mMdLSQmTNoXKdBm1GTWTKdJMmsy5RsMo3GLYZtUuVrinfxprJq
szaCq6XKdJrSKI97n2z83q8GN3q8mTUYV6UvPl2k4Z7Vc7oS4Zdg5NtRxuwdudteXavZn1c+nnbc
Xqaz8zbod+HFHas43ZMcLtWcTtiON2F4nYOOetZyR2jidg5I7IOR1wcrqHK6i8s9KOZ0jmjpiuV1
E5p6C80dUJyR1xXK6YTmdEGLWDJrFmTSC67OmO+O8JLCZSJSRIJITEgEwlRICElhMlVhVYQkQsIW
mKLlqtJSbJarCq8FVhWbCq8FZtCwsOrPXLn055lvnCVQAlEEkCgCBKJLdPIzfY38vPl3t6vk+qeG
h24SiCyotECVRKILRAlCpVlJiBKCECYAFBUTAiYITAAiYSImKqmEiJERIiJFYvBqsxucdsenORcy
SJiQBMTAEgBZBISQCSElEgkiSJAJWCYErCQIJRIACyIA689M8dMUN4srJMAACkwgAgkAkhI39Ty+
/j38hLtwgCJgARIiJgRMAAWECYgSgTACCYAJUFIACEwQCIklUlqtJVeZaNJllqzvmx2x7edJYkRI
JiQTAkAkLKJRISiQABMSokEwAklABZABEgJUiQIA6884zuo1giQFAmJCJAAgkAAkTCWUSImCImLE
SIJKpghMAipQSYBEwIlZAgFCUQCAQBSAQEJFZlKlMq0WWZm2bouzrzstsfR5pFzKJJBIkTEqmBKJ
JCpElEkokEkSKAkhMSAoARKBKJWSCQAoEgTCJCwmEAAAmAShZgSYAiQQTMFmYFkJZhCIRQgmCkAQ
JRJCYQBEwQLAgFIlUEICBSAIAiJRKzNJlvNLraYtLa9bRsM3zMdsvR50lzMwJmBIkkKBIJCyESkE
gAKSIlMAAoQAkEigAAAqQIEzWYmEiAEVIhNRKFSgTASgTAShEzErKECKRMCAEUIQFEJIBAQAQAQs
oKiYgQIKQAgmExEoWZhLaa2mrXpeLSk1SzfNy1y9HnkXKQlEkoSWQWZrJKBMwWyJSUCyosgTMCUF
sqi01kmEFkCRKmBKJUQWVFlRZAlAlBZQSZqLKypBJQJQJQJRBICBKJWUC01S2RIhAgSCKlAIEoBA
lAlAIJKBKCkCUJZRCzEEmEVMQBESgShLMxLUzEyzpS0t5i0myUvo1+jeb1/OPox86+iHzr6IfPPo
R88+hHzz6EfPvoB8++gHz8++PAe+PAe+PAn3h4L3h4E+8PBe8PBe8PBe8PCe6PCe6PCe6PCe6PCe
6PCe6PCe6PCe6PCe6PCn3B4T3R4T3R4T3R4U+4PDj3R4b3B4T3R4T3R4T3R4T3R4T3R4T3R4b3C+
G9weG9xHhPdV4Ue8PBe8PBe8PBe8PBe8PBj3x4D3yeA98eA98eA98vgPfHgR9APn30CPn30Cvnn0
I+efQj519EPnX0SPnX0Q+dn6EfPT9AX5+3vDwp9weM9lAY6AAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf/2gAIAQIAAQUA/stJsiP7BxYLXKYcdeDSZfbU
sXx2o/67gxa7WaSLZKUK2VkZKVvW9XW39cw0048iT4Zm3l2bfDUp3MW/ri5ut1GQ1VnL8TaOH1qw
/iX+F/W04bfsZVJl9jJbGrJacSpTTT/syOMf9cwxeukdJM2KtK0w1S0I0c8zWr+uUu6Wretqtc3V
OrSTfqy5VVf16tnV03Y8+Jq24xF9xZr+/lt6R5eh5eh5eh5eh5eh5eh5eh0KHl6HQodCh0KHQodC
h0aHRqdGp0anRqdKp0qnSqdKp0qnSqdKp0qnSqdKp0qnSqdKp0qnSqdKp06nTqaKnTR00dNDpVDS
I4ySSySTUSyWSyWSySSSSSWSznxXq/oMkjsS2JcXYb4Nkk+inxwJEEEC9X4+SR2G+CESOw2SN+Be
v0yQkQJCRAvV+NkdjUOxqJExM1DsST4vz9IkJCRAkJCqzTzXq/GO6Q7k+KSfQxz9DBBAkJCqKoqi
XCea9X4ZuCeLaRa/4CeKtz8MEEEEEECQkLwQRzXq8c8HYn0drJGqRMTHYtb8IveIIIIIIIIIIIIE
mKoqkcfzXq8VrQtRbJC1Go1Gs1isT4b3gbkQmTA7T4pJJ9Kn7SQkJEEEEEEEEEELx/mvV4slpcjc
iJJJEIrxbgtYfMji/HBBBBBBHCCCOEEENkMqmmkJEEcORJLOfCTUT4vzXq8N3Ca4QQQQQQJC4NwN
kEEEEEEEEEEEEEEEEEEEEEEEEGkg0la84I4c+EcZJ4IUiRBHGOa9XhupGiCDSQaTSQJCQxoggggg
ggggggggggggg0kEEGk0mg0mkuoTXs0XswQQQR4INIqiqKpBHg/Nerw2Q0QQaTSaTSaRIgaGiCCC
CCCCCCCCCCCCCCDSaTSaTSaSCCDIvZacUXsx4INJpNKIRBBByJXCSSSedfV4WNEEEEEGkgggaGiC
CCCCCCCCCCCCCCCCCCCCCCCCCC65RyqvZ4QQRwgjjJL4zxgjnX1DGySeEEEEEEEcXxgggggggggg
ggggggj0dvVHJeqCCOEeGCCCCCCOMc6+ob8K9A36X8/R3y6b+Fn5L1cJJ9EqyaSCCCOdfUQNcY8E
+F+l56/FHg1oyfF8L4L1eKCSfCvURx/Or5TxfhfCSSSfTdS3V9D1F1LN9TJ8Tws/JerwQckah8eR
K4Lw/mvULxv0rfBlMqs+H/f4q5at9f2j/ut8TJ8QeSifG3JUyKwvV4ENkki4MTExPhJJJ+aXKBRw
jj+fCOEcYII49SustZvNIrJmD36XVuH/AH8Lt9YTl1vVvH8X/mf91viZPfebTbL79fd6tdRXJa1c
Xv4smpEk8H6uK9UD8E8ZJ519Qhk8fz9G9wpyv9R5apv41btZMVvaw+9jtGPBZunUfVvdUWTJZXyP
9bqvqYm9WFvqY/iv3rZLLK/i2+Jl9/J7+X375LK1vjVy2eTEYffw20rFbVQjg/VxXB+BifBn519Q
h8PyU8H6FlM1na3vZffye/b4y9/F72L3l8PbuFz6m4cq/v5Pif8Afj9eL38fxf8Alf41vi39/L7+
T38vv5Pft8anxcZh9+nubf3ONuP5I/JiHBymxVi9R+a9Qh8Y5eiyatKxXTeK7bx3btju308k9PIm
sd0LFkq+neFjyJaMsvHkaePI28V2+nk1LHkQsV01jyJ9G48WR2eLJLxXbeK7bxZG3iu28d2+nklY
8krHdFcd01julhrer424/kj8mIfCwvWnybRylepNSmTPgfjQ+KFwZEtlixHspS0RDRUQk2uD9VmW
Y/XwQhC4ISlY/A+P5IXqYh8GuEFqyfml7MLhVeB+sfgYh8F63VGloiDQyvv29dhiU1pXkWq5iG/f
t79Pd4P1NSMfrLqLIv79vfiHT1FfcooXGy5cUL1fn+TfCeSXNIRCmvqIF4PzgaZDFzGuCGI08Gue
mbxAk9aopdXpVZtVNJKKjHVS/faRRc1TnHOJcIiW/WXl2Q6ptpu7SinulFygRBzG2yCGQxIXJDII
IEieH519XoJOZA0aSD8uPMnhEX4U99okQ+fDSpSNKT0oeNlV7elFF7b9cEItjbZA0pdaxSsHLgvH
yJ48uE+D86rlBBH4NqX4IF6CFMCUXfruocfh4IIPzr6vxf8A2cORy4cjkcjkQp6bby+9yORyORyO
RyOX4KSedfV+L/7PR5E3b8PJJJPOvq/Fw9f9CnwfnX1f2F+dfV/YX519X9hfn18p5jKeYynmMp5j
KeYynmMp18p18p18p18p18p18p18p18p18p18p18p18p18p18p18p18p18p18p18p18p18h18h18
h18h18h18h18h18h18h18p18h18h18h18h18h18h18h18h18h18h18h18h18h18p18p18h18h18h
18h18h18h18h18h18h18h1sh1sh1sh1ch1bnVv8A2L//2gAIAQMAAQUA/st2SE0/7BzbmuMvny34
JtGPd5KmPLXIv67uc3TrVNspivctV1ZjyWx2pdXr/XNxfXlxNrht0li3yXDS42mqv9cs4VqNPGab
Rg+FvVK6N5Xwsfxv621KS9vE4MK6mKuW1G9WZ3urX/68SnL/AFzOnS+pIV4MGa17ZNxdvqM18tun
p/rmTGr1vS1LJ8q3tWybaXrwYXd/161a2WTZC22ZOu1zGPaVq/V/fz3ORPzOQ8zkPM5DzOQ81kPM
5DzNzzNzzNzzNzzFzzFzzFzr3Ovc69zr3Ovc61zrWOtY61jrWOrY6tjq2OrY6tjq2OrY6tjq2OrY
6tjq2OpY6lhWsPI0dVnUYrWYkQQaTSaUaTSaUaUaUaEaEaEaEaEaKmipoqaEdNHTR06nTqaKlvX4
14V+F0kCoyEi1uNaCXgkb5cIIII8E+FLi2OxJb1+NfiEhVFVsVUuFhiTZWgkRwni/UJEehgjjI7D
sOxJb1+jX4JIrSRVFUggaHU0CpBHCfC/dS9BBBHFsbGxsdkPIjWW9fiX4auNsVEjkST6VIdHpj0X
IkbGx3HctkY23wjlb1+JfgFVsajjWrZXGkT6aCBVEoLe74pNRqNRqNQ7DY+DQ2jUauVvX49L4Kjj
SyOEEeOmNs0wnVIaK0krWE/wCQuD90kk1GodjUajUaiTUah2Q8iHdjbfH8revwpGOjs3j5Uwy3Q0
Dxo6R00i1B1jjHDHjkSgY0aZFVLg/BBBBBHgnihcWnobGxsdh2NRqJNQ7EsdmOzG/F+VvX4EIxU0
1aEkhkHLgxl+b4JNlaLguD4ST4EvFI2SSSSSahMxVRCnPj9l2HYdiTmQzSzSiEMdWzQzSQvD+Vve
nwIw1myZJJJJJI2NlhlayJJGokkkk1Emo1CYiSTUSSSSSSSSTwk1CzwvMXMm4tpdiRISXCeMELgx
wOyHYbJ4Tys+c8J4IxWhK3LUaiTUSahsbGxCZJJqNRrNRJJImSajUajUajUajUSSSSakax5B3Md5
ev2s127ySJiZJIuEmtDyDyDuOxJJJJPKz9onimUfNW5ajUajWajUax2HYVhWJJJNRJJJqJJJNRqN
RqNRqNRqNRrNZrNZqNRJjt7UpPI/bnghEo1Gs1sdmOzNRIk2aXwhioKiNKiz9qfDSZTNRqNRqNQ7
mo1GoTEySSSSSSSSSSSTUajUajUajUOxqNRqJJJJKP2pc3ftLhqNRL4STx0iSRPCEQuEk8re8JSK
o0JCNRqHY1EmokkkRJJJJJJJJJJJJJJJJJJJJL9DXk9TG+ckkkiZJJJJJJJJJJJJJPK3vQUXhtw/
Lwpeiknh+U8JJ9BTC7U8KOQ/WQQR44I4Wsk9ZJJqNXK3vIVhMT4SN8Pyj8DBC6ceOeKxOMK/Rjwo
gfr8UrwSJ8pL+t+qSeH5Xrz0sgSZVcHxr6iCCCOMej6dOj6FYn0q1XSxfC8K4Pxc4S4v1EPhYfq4
I/J+uBrwRxr6vSJcxevJida8P/P4rYr1S23swf8Anp8LF8MWK7UcaqXkx2on65J8CUCrJoY+XBFk
NDQ/UkaRVRCiz56hsTJJJ4pkkk8ZJJJ4dK2gpRLBA6OpuPh3xunD/wA/DHVdAsoVqWSyfB/4H/RT
4WL4dcGquL4dve6NtBfHWls/uZsWh8WVcWbnj+f5xLhmkgtWREkn5WfORi9UcfyQvRLbPThX6Sw3
ar8vfGnizUWjP7mWurLuapX6S6OPHa7xYqPHjX6HRr0syWnOl0svwV7tMVXhXwafCw/DxfDw/Dx4
qWpX4FsNViyozKaZqajNTTeONVzHxYuLF644OUvyt7wxep+r87Vh/kvBPgkXrybelaU9zD8PF8Ov
wH8PN7ub3H8XdKXy6W1TTp8PEv0v/PlXs5vh5fg/8cXwK/Bp8PD8PF7mH4eL3K/Av8DIzN7l/f3X
v8F66+vh+aH60flJLio/Ux+r8rLmMr6n6hNtr1LjD8WPTqtlxtLNjSrlx1VcuOq6uLT1MbVsuNjz
Y7J5ccvLib6mGFlxJrLiSWbGl1MWl5cbHmxtPLja6+MWbCqrNihZsaSzY6pZsSSzY0llxpdTGqvJ
j0vLjZbJjaeXG3nvSy4L119fD80P11H6q+v816xiklxadTqQept8hcEL1E8ZFwRYfrYvXMKvqRUl
6naEzVqrYv625G0nwXrrUqhLkmhjY2MtwY7RbLwhCia+vh+aLeuo/VVH5r1klXDnk3FtTfCz5+Fe
rwrhb1K7HZMmVrqiz9isRUqWcWyWGUstOrVVe5X3MnvfmIThIXqgxuasx/Dp7mpNX9bLz1LvU3wX
rq+Yz8y3rXBJz+bXOVDY/Vziyc8yR+vj+Woq1CaZyRyfGvqbNUk8p5a1XHKbfw3dwrLXq01u027T
ZnrFZpV9xSXfJ3hJyphamTpqvUY0lRoVmkrJUTc5GtUmR8552Qkj2RJIlIbRKNSLOXLFJqNRqG+M
crRMLhI4J4ScjkciRPk7Ei9b4wQcpmccwci/w1YSQ0L1Gq0WsO7aVmVyos309TLt9OrcSyWVyezz
OZzFZt5LySxF2m/CvVzI4TygghnPj+Vvelkslkkkv0sHPgrQuHPhI2c+EsfMglkkvg7Tjr6qW1KW
S/RT4ZOXofyt734OPDzOZzOfS5nM5nM5nM5nM5itaHlSri93mcyWSzmczmc+PP00EEcre9+L/wCr
0eNpV/DwQQRyt734vUtH9CgS4/lb3v68vBPK3vf2F+Vve/Dz/R14Py8thZ5bCeWwnlsJ5bCeWwnl
sJ5bCeWwnlsJ5bCeWwnlsJ5bCeWwnlsJ5bCeWwnlsJ5bCeWwnlsJ5bCeWwnl8J5fEeXxHl8R5fEe
XxHl8R5fEeXxHl8R5fEeXxHl8R5fEeXxHl8R5fEeXxHl8R5fEeXxHl8R5fEeXxHl8R5fEeXxHl8R
5fEeXxHl8R5fEeXxHQxHl8R5fEeXxHQxHQxHQxnRxnRxnRxnRx/2L//aAAgBAQABBQD/APUdslKJ
73Zopnw5P/8AAs2fFgpvP5RhobjvXctw7WtZ8MHcN9tzafynPQ2fctnvV/fvc+7YO303m93G9r46
Xvjt2n+Q62mmv767r3Knb9vmzZc+VUx+Q9D2HvLq/wC+cuSmLH3He3326K9v3D2DUMw4cufJi/iu
6tTf9p3ew8K5HYe5ec2398fyfe9PAJtOu7fkG3Zn8X21K7Yz4aZ8OWjx5fB2rePZ71NNf3v3vceY
7jwq/wDUcP43+18N783w2Wytuy+xyYdxTtezou0Z+vsf72y20YsmLLevCr/1HD+N/tnDefN5q6Ms
LT2M7l82fxu84v723rja9zSXbsXb+pshY8v0rZ9u3W+e52mba5+w4bYNhwvtrZO57zsW5yWW0r5X
s1dNu4/Nn8b+N/e27rq23c/2/bY3XaJLFuVejx/x/dY69z/km0SvtN1TBWtq2W53ePGs1Xbuu9y1
2mxXq7Woz9w+bP47WLf3taqsu7107TF8K1Fk3iyPy1a22+/jDvtv3Ta5cWz7e7U2e2xXt3Lb7HHi
y993Ns25O2prcdw+cOzYOltv737ttaZHV0rWux2iVNtt6YXsto07MxdxrGNbfNS+92+F59xfPfLh
xZM2qpjx48eXNt8WbLWLWx0WOn979y2dd3tb7bRW+3pUe2p1KbejOhXp229EdqoqbLf4KW3vQrov
t6I8tTVXb0YsFXXsfbtGT++e9bCa7rJeu4fxtvku90vhZGqrbd23Fa13T3G4/wCvcZnTO/fodv2d
91kxY6Ysf98tJrvHZL2zP41cyw7rJnrjxbj4Wx+DtfnP+vefNv39js8u6ttdrj2uH+/N32zBuH3D
tO922RyX3dr12PwepbFnx7tpZ8OXLvdt2a1rY8WPFT+/mkzcdp7fuTL/ABbaWeL+PPFVfxXHa2H+
OduxPHhxYl/+gsnfu348n3D20+4O3H1/tx9f7efcHbj7g7cfX+3H1/t59e7efXu3n17t59d2B9c2
B9c2B9b2J9b2J9b2J9b2J9b2J9b2J9a2J9a2J9a2R9a2R9Z2R9Y2R9Y2R9Y2Z9X2Z9X2Z9W2Z9W2
h9W2h9V2h9V2h9U2h9U2h9T2p9T2p9T2p9S2p9S2p9S2x9S2x9S2x9R2x9R2x9R2x9R2x9Q259R2
x9Q259Q2x9Q259Q259Q259Q259Q2x9Q259Q259Q259Q259Q2x9Q259Q259Q259Q259Q259Q2x9Q2
x9R2x9Q2x9Q259Q259Q2x9Q24t/tzz2A89tzz+3PP7c8/tzz+3PP7c89gFvMLPM4zzGM6+M6+M61
Dr4zr4xZatdWh1KHUqa6muprqa6nUqdSp1KnVodWh1aHWodah1qHXodfGeYxnmcR5rEeaxHmsJ5r
CeaxHmsR5nEeZxHmcR5nGeYxnmMZ5nEeZxHmsR5vCPe4Eef26H3HbC7htmt582vEvEvSLxriv6JB
BHBVbFjNKRZpDsSSSISkpjK1gS4TxhiRn7ps9rdOULhJJJJJJJJJJqJJJHYbOYqsVBYzQaUQQaSE
hsbGx2Hcdx3HcbMdvZ3nza4rivwC4riuCFwXij8bBpI4KrZXEKqQ2kWuN+BIrVsrRCQlwkSEiCCD
Pgx2yRwkknhJI2SSSSSSNmrhpbFQVBVRHgggY2NjZaxaw7DY2Ow7GO3s7v5tC8KXBeNeNejX4qCP
GqkcEmyuIVEhlrFrDfgiSmNsrSCBRwhsVRVFU0mlEGZKX4ZGxsbJJNRJPCJFUVUQL0DY7DY7FrFr
DY7DY2NjZifsbv5tCF+FX9KVZFUggrjbFRLhA+RYt4FVspiK1EuENiqKoqiqQQNcNxaifgbGxsbG
xskknghcULwSSSOw2Ow7DsNjY2NjY7Idx2MVvY3fzYhC4Lxr0S/pKUioKpAqNlcaRBBA2WLMtwSb
K4ylIFWCCBVFUSFUVSOLIO45dx5vHq6bGxsbGxsbGyfAiRMQhcJJJJNQ7DsOw2Ow7DsOw7DsWsNj
Y2Yfc3nzaEIXiXhXgXgXhXiX4xVZWhWhpggVBVFU0GkaLItUtQaFWSlCtBIVRVFUSFUVSPDHDfJe
Yr7rY2NjGxsbG/BImIQhMkkk1GodjUOw7DuO47juOw2NjYxsbGYX7G7+bXFfgY4IjivTR+DSFQrR
srjg0mhsWIWIWNI0oaQ0OrHUsh1keIriK0SNIqiqKoqiqR4Y8G8S61X7DY2NjY2N+CfAmSSSSSah
2HYdh3HceQdx2HYbJGxsbGx2Q7jsYbfp7v5tcV6ZcFwQvTL8MqtlMbK4TSkVo2LGKgqkcGQQNDQ6
mg0I0iqaTSKpBHorM7jl0ZsOWmTE7DsNjY2Pi14Fwkk1Go1DuO47juO47jsOw2NjY7DsOw7DsNjG
YV+nu/m+K4x4l4FwXiXFeBfiUmymG1nTbqpXGkKjFjQqpEeCCODRBBpNKIII4L0bY7GbNXFSlMO9
3PZ91i2281DY3xgjg14JNRqNZrHcdx3Hcdx3NRPBjsOw2Ow2MfCCCDCv09582uC/qiK45MW3krjV
UqCqR6SOD8Mk+gkdh2LXN9jtn2u0WKj7dgrm308III4MbGxsdjUajUajUOw7jsOw7EvjI7Dt4WQQ
QQchtGG3sbv5tcF6FeBcI4pfiVLK4i1Y4Q/AlJiwmPEKol6SPC2Txngl4mx2HYdh2HYbM8fUu2pK
/GSTUNjY7DZI2OxqHYdiSSSSSSSRvxwMbQ7DsNjZh+Hu/m1xXgjxxwjwRwXGPQLxR4VVt4sOlWRa
js+mkrIa4Vo7GPEkUoJCXpp4tjfiXCSSSRsbGxsbGMzOO5bB/rSajUaiTUOw2NkjY2SNkkkk+N8I
I4MbHYdhsfgw/D3fzaF4F6KtZGoIEY8bY6GhiozTBHgSkh+mpR2eLAqpodZGklYdS1StedKmOhVQ
IXpJJ4SN+FcJJ4xwY2NjGMY2bit1v9lLyySSSSSNjZI2NjfBkk+hkkdkOw7jsNv0GH4e7+bF6Jca
1bdaQnSR0MeF2dcOlPGdM6cJ4x4h4xYyuJt9KC1B1I9HjxuzxYVRQQWLEEDRjxyVpBWsCEL0E8JJ
JJJJ4PwN8EmxVI4sYxjGMYzJs8GTuOz22DFuuLY2OR8J4MfF+J8JJHY1Ev0mH4e7+bQvQoXBKXhx
GgtUVJMGHTXQOppNJoNCOnIsMmhIdS1R1HQfjjhjxuzw4lVQRBZj5jQxopikrWBLikJeORs1Go1G
s1Go1Ekk8IZpFUjhBHFjGMaGhjG7eaxa/MWTXgY+L9JDNLGmPxsgga8OH4e7+bF6BC44aarUx6U0
OsvHjlpciyIIFWRURpNI0kWGhpF2Px1o7FMMvFhSEjkizGMZBWohCQkIkkkkkkkd0O47juazUahW
NRqNRPBCJJJJJH4GhpDQxmbJTFjz7vuO6LbTJcez2iNu74bYsnUpJIx8G/QQzSzps6ZoRCHA4GMg
gg0kDXFj4QQYfh7v5tCF6BcFzNrihECRjrCkbHz4JCjhI7DsMZaxfwLgpZTEVq28dFVLkah3HY1E
jYlwSbEkiSTUaiSSTUO48g8g7juazWajUazUahWNQrCsaiRWJNRJJJKJGxjGyzR3nfOmR5cmU2W1
q33DCq3ypnbMrw7qSSSSfDpbFRsWI6aISJQ7I1EsaY6s0HTNCHQdRrgxkEEeHD8Pd/Ni9Hgo7Xok
lK4ShWNQ2SSJkkkjZI7FrjZYnjJWrsUokVpJWqRKRqHYbGyTUJ80xMTNRqNRqEyTUOw7lsg7jsO5
rNRqNRqNZrFcVhWFYVhWNRqNRqNRqNRqNRqHcd0O5a53S2rePI6mHzmS18O/LUy0NhV+b1Ekk8NL
YsbOmjTVHJDsNs5kEIhcNLZoNCNKGMY0OpA4GxsbJH4MPw93834p8KNtVVqrEmo1CuK5rNZrFY1G
o1Idx3HcdhsdiSeFKOzrRJUqIkk1DZJJI2JlZEOwmaiSSTUOxa47jsamxsbNRqNRqNRqNQrCsKwr
CsaxWNRqNRqNRqNZ1B3Hcd0O47HccF/N4c7xum4vYzN3MlstH25Y8lpJJEKDUajUST4YIRyJJJOQ
4G0Nodh2GxsfoMPw93836FcKc3S0JWkTHY1Go1ms1iuK5rOoPIPIO5qHYdiSSSqkq4KCZqNRqNRq
JNRPCqJNRqNRqNRqNQ7jyDsx2JJHYdh2JNRqNRqNRqNQriuK4riuazWazqHUHkHkHkHc1juO47m/
21t1ix7LeVTy9N7TB5nHfY3vucODFhUkiZqEyRMkkkknhJJJJJqNQ7DuOw7DsOw2SSTwnw4fh7v5
ueE+CSeCJKPnW5Risah2NQ7DsKwrGs1msdx3NZrNRqJJEVKcitjUajUajUajUSITRqNRqHYTNRqN
Q7jsSSOw2SNjZJJJJJqNRqNQrCsazWazWazWazWazWax3NRqHYdhswv9XfbTFmfaqXxbefakkkkk
VjUSSSSSSSSSajWO47juO47jsOw2SST6HC/09583PGSSfBPBMo5Ksk1Godh2NRqNZrNZrNY7mo1G
o1EkiZVlbCuajUajUSaiRM1Go1GodjUazUah3HYk1DsOw7DsOw2SSajUNkkkmo1Go1Go1Gs1ms1m
o1Go1Go1EkkkmFxkunZ7dWoT7Ukkkk8ZJ4SajUajUax3NY7sdmahskkkn0MM0s0mGv6e8+b9BPCR
MoytuUkkjsOxJqNRqNRqNRJJJJJJImVsKwrGo1CsajUajUajUajUajUajUajWajUazWajUahskbJ
JJJJJJJJNRqNRqNRJJJJJJJJJJJJJJR+1bcPHk2mbqVT9qSeMk8JNRrNZrNZrNTZzZpISGNkk8J4
JNixWZ0oHRDRBpNKIXgwx095834U+M8ZKsqxWNQ7DsOxJqJJJJJJ4SSSSSSVYrCsahWNRqNRqNRq
NRqNRqNRqNY7mo1Go1Go1Go1Go1GokkkkkkkkkkkkkTJJ4SSSSSSSSSSSY3+rusVcr2FclMU+0mJ
k8JJHY1DsaiSSGzSckSSNjY0PhpbFiYsdUckSNjRpNLNJC4ySYX+nvPm/QogSZVQJkmodhsbJJJJ
JJ8E8JJJFLFyEySSTUajUajUajUajUajWazUajUajUajUazWajUSSSSSSSSSSSTxXGSSSSSSSSSS
SSSj/Us07YUkf8kISfCRskkkgg5EkkkkkkkNiqKqEvDBCOQxsbGxsnhh+HvPm/AhIgggrU0irwkk
dhsbHYkn0EkkklVqcQokXIkk1Gs1mo1Gs1ms1ms1ms1mo1Go1Go1Go1Go1Gok1Go1Ekkkk+kknhJ
PCSSSSvxMv6GTa5L3VOduRKNRqNRJJJJJJJPCSSeCFByJJJJJJJHY1DsNjfCCOGH4e8+c8FUJCXF
CFyLMdoMm6vW/nsUV3uKzvusVXM+jnhWrZVKqG+EjY7Gok1Go1Ekkkkkkkkks5kNjq0+Ekkk8III
IINJpNJpNJBHgnhDNJBBCORPCGaTSi0LI6qz2tOlXG/akkknwySS+Mkkkkkkkkkkkkmod0i26w1P
OUK7rFZkkkkmow2/T3nzfGqKoXB8EIksy79hr/HhmnnZC9Xo61krVLwMfCOMEEeKDSxUYsZoNKKV
9vKoyNGk0mhmg0Gg0mk0mlGkjwzwk5sVWVpLybfLhfIfFoggjjJd/qeYo7UVU6+8QR4IZpZoNK9D
JJJJJJJI2NXy5FXBVqVXEsd3qJJJJJML/T3nznBFRCZqHYkVhWJPWZ3GJt+XTE0Xbher0SRXxtcI
4T4I4QKoqiS8OJTlzV/V0mlEIhEEEeOSSSeECqJLh2zaY9zk39KvZ+OSeNviZ6ZlbZ21FfenwQJE
Lg/AySSSSSSeUkkmpGodjUZrXV5bLT08DfVkkkkkkwP9LefN8UySSSSRMkkk3L/Qt8Fev8si9ler
0KEV8THxfCSSSSSRMkkknjtKPJud3R49x4JJJJJJJJJJJJ4Kol4Ox+9vFO1nhJJJPCSSRsfxLNK+
3rSrXviQkJeNsbGzLlrjSsmpJJG+c+zqL5lQy5bJUu3SSSTL76Rdexh+J4YMHwt5834Vwgggg9RJ
un+jf4VXzXMyL2ar2YI9DVSJeKSSRvxSSSJkkkkkknb7tb3uNm97JJJJJJJJJJJJPFC4SSYMO08p
0dhlxdi5m657aSSSSSSeEMggtyyPJfVSys6e+LhPGeLY2NmTJWlcma2S2LcXoVeuuNTfQdNF6Og8
mpttPKmlj+Hxy/EXrt7mJ/qeLB8LeL/LjhHhQkRwYzd/Bv8ADr64Mi9ii9mOEEEEEEGkVRKOEkkk
kk+CRsknhPCSSSTUajUSdp2ebLn7ts82LPJJJJJJJJJPGCOCJJKLXfdUWHcdqw4txsNxtMG32n8f
549wpwajUSJNiqxVNKI4Phf38+BZb7JyV5XTJJJJJJJJJGxs3GXXeqkyJ1e1z9N7XJ1W3zyZK0Mm
V5nTItWfHel8jUYmnj45a+3KTfuYvieLD8Ldr/L089JAlwSIEhDY2Nkm8+Fkfs19aRkXsV92CCPB
AkRwkkkkdiWcyWajUzUSSSSSSSSSSSSSSSdlz48mz71nx49nJJJJJJPCCCCPDJJ2XZ0yPvu0oq9j
+S3/AMn/AB9f4+ZTidXKqJITJJJJGyRss/btZK+KVafbkkk1EkkkjZJJks60ptloW2pUy4qVWbbK
psOVHzeW93kwv2fa15cnVddvt7Vnb47Y89cguZBmUKx/wxe/4sD/AE9380kQNEEcySSSeDEjee5l
ZWJSU5Pcx86obJJJ4IQ2TwnjWppQ0hirI6jqNDXGmPVj41Tta+zx477jC8GQkkk/j3PP35ackkkk
8IEiPGk2Pb51Qg7Go2fe1Oy7Ko2W957TsKjbZPh3ftySSaiSSRsdh2G5vkval8WSt7P35JJJ8Mrq
8jdNrLrR7XSxu1rxUSUVpVFU0si/UxNpbjMshjicVU8eeU9nVXrwzrldc/8Ahj98ghmk0kIw1Wjd
/NJk8bciReGCDe+7mF60kZPh4/ckbGyeEicGo1CZPFc2mOyHYXBsY+EEGFRtYIIMC/Wzdk2mbJ3r
EsW8jwfx74/ftDv4UySfDg7Nhy7bebDLtHsdvhrtu4VnZOFw7N8l3lTsuz/Jbz5XsXytvdyr9Q2X
bc28q+wblLJW2O+o1Gokkk7bgx7jfPs3bWbrCsPcbe/4YILZKVviTe4ik7pLXdN1s3YxY02qNlHZ
r29GbUtq5ZRpVdLaq1snjtRUzUtd7at8VIs108hl2+W6tsdwyGlj9/wSMw+rdv8Ay0xWNRqLMRUb
J4ySbzmsy519aMvw6W9jUaiRMkk1EiYmSSSaoNZrNQrGo1Goknh2rY+b3HesOOmxggWNvHtl/kH8
gX+Z4P478fv+p5fB2jb4dzu+8bLaYdnJJixq+OSlFasmx+T7/wDC2fyu7U7bK4ojs3yXePkez/Jb
v5XsXyr9S2mfPkaafYF/iG/X+bBDIZAqy6/x7cOvasbxd3O/4ard5tvmw3ggggggga/zcXv9XXkz
5LUdbXdenlsVxYEdLGY6pJVojc0SwL1Kq6t7VjZUrVwvFBeVatXXIuEkkkmH39583JJInOO240i3
dDHkrkrwnjBvEZver66mV/p1Xsw/QSSST6Hse12+5p3TtT2rgg/j1EsPfPkcWHJmZtO3Zcnbduo3
J3vG8m+XasO12cEEH8fT6/f1ZZeEEHYF/n9++Q4bPHZ7M2/ZdxkxbjbZdtk2Pyff/g7P5XOpwZ/c
rzXZ/ke7fI9n+R3Xy3YvlX6uzqNxkwYdHZPljfx5w7RtdvuNr3LtGHBhgqvap7mz5d8O/wB3XfZM
GLPjy0VMgkX9lIhRLN0tN9onob05nfHW175LUeOVje1q4rOP3X0p3SxvCvUviZLVq9nlVsnmMml7
jLPVtrtnydN5MnXWW3ltVvMbmeruac55NklrKqtmlq/tbe/tbx/5kkkmtrHkbJZs/gyknlxlbVsp
4I3nN5l7dSrMvw6L2YRCNJpNJpNJpNJBBHGCCCOCw5XT+OJdLPirmw3Wi8n8e+W74p2HZNrXHtu6
bWu33fb6/wCA9vkw707l+67zntO3bWu53Xe9lipSDsODIrd82+SvHt+y87l3HYcdcf8AH6zu+911
dv2+yzbjHm2ObBh2O2w02mfb4rd2w4q4cXfNtjx32Xyffvg7T5bN8HZ7C++3PdO212dezNPYd1+R
7R8jufl+x/KnavmMvw+x/Li2dt53HedkxLD2FRt+7fIdu7XbdG82WTZ56+7jql/Jjv8AR331fdnL
57uO18ruF68vuu1aPqUNdTOnd7Wtlitezeq6LrNo04WsNKWXTK1aHi55tt1KZKdO/wDzyy77BNZK
9JVdaalirqdMXS6dOveuGuDqY+vufi5EnZVUQiDcpLGsjnVkeHb2ydHefOcEJVeO75mPJbDhvktc
l9J5LUvt9ysnHd+vM/bq2KzMreir9mfSRw5HI5EHbtvg8puttXPtuw10Vfq2nbse73efse1ph7Co
2/8AILvH27s1nftu8at3qtVVdz/cjuT/ANpuvluw1evuVbW2UG00LbbxUe1g2vasfk+y4r4d/u3G
1/j2Pn3GmvY/x/5Xvq/wtp8rl/ezv9Zw7L5Tv3wdr8tm+D/HGrYu6Kv0/sNtXbe6fI9o+R3HwOxf
Kna/mcvw+x/AO2fP5Pc7J8Hu3yHbvku/fCr7tLW+6T+Q2a7jX3exY3k71/IKxtlyeX3NzGulk17M
8ivLA8GZVWSums9OuK9qUw462pWqVVppkqnd1q6501ksOzRsPjJV0afYor2y3Va4nd9bTktt5jPu
WrZMnvUfJmjKsmdK1a8l/wCfbpdHeL/LSNIqn/DJZFffva2RurLJrDaRey6Oawbvk8066yL15tPT
qvZjwyST4JJJJEm3Tse9tTcbPPtrdu+Sfq7N6ztXze4U4Owv9D+Qqe29mrp7bvH/AL83yVu7HeVp
7puvlf469W0zfCa59pdse17q75Nqlzx/D2X7vv3Gy7DWNvmWrD2Ff4vfPk9r8tkX+7O+/A2fynfv
hbb5fP8AB/jXwO6/t38eUdq7lz2PaPkM/wAHsfyp2z5rL8PsXwTtXzuT3OyfB7t8h275Lv3wq+7T
Sv5SfyLJp7nX3f460+5d+U9vN2l0clVFK1S0qVRD9jBkV9OLI9KeR0WKujFXa6/0Ypp05tOt6Gsy
XVuJVNjac1Mt4y58lcFd5ms8ma2p7zKnj3Fr0y7jJjtjzXymT31yN1mVFZsopEf+fbv9HeL/AC0u
EDS6eRWRX3muWll1GKyUnXx4nTPjvbee9ll5FVzDRl9yvq8UEEEGk0mk0mlGFfrHfvlO3fJW9XZf
Wdqf+fl54uw8sXefke2qNlu/3w337ud8ra3cNyp2v8crp2eb4ODE82fZ58WXPusmPFn3GF4Nxj9z
afu/cPkux/K39zsPyvfX/ibb5bRa/fDvtLPa7P5Tv3wdt8vn+D/GE1i7t+2/x5z2ruKnZdp+QzfC
7J8sds+ay/D7F8E7S097k9zsbTxd1+R7d8l334VfdpWv3QfyClVv6+7/AByq853murt6M+pY76o9
uPW+rNrtPb29VHlaSyKirt1THfBa7WHTTS8eW9NbvWM6XVzPlSra2FYzVthpXK8fl8MTkT1Xx3ds
OquLcVve+JXqsnvo3lGzIoeP1Cjobf4G7X+XHD8tOrHlrWKqbxzsWTeNpyZb9R7XJpyb338q9tIX
rye4lyggggggggggjw9s7d5pHctpfd4dpitg275rt+zybVmy2WbBu7Ka7LZLaV3m281g2+HoYc2w
6u+M2wWXdm87ct1uMlNePt+y8lhvXXTbds2+CmHte2wZdzsNvurbjtuDPVKFi2HT3mfEs2HYbTye
Gymuw2fk8W+2i3mHFTp4sex0b43WDzGDDj6WLf7HzlMVNGPJXXTtnbvp9N3g8ztu3bPyO03GLrYd
pg8tgutVdltFtMZtdm9vmutVO37J7PGbXY122eymux2VdnXdYPMYNvh6GHfbJbyqULH2vR3M7l2u
2+zpQu29s8jfd4PM7febbyu43HwstVavSK102q5VW3t3Zxiplda0Va0vhrSuWcis2VadMtov1Hpz
x1c3qVrWXb/jPG4y48nl66U1IupG4tdV9tF3Yv7yg3Xwss6qCskdRxt736e7+a4fk3px5Eop78nJ
2vyxtk8p54eeTdT1si9tQJqbuar1cII9Jsu45dpT6r3DX9Z3uj6tv9X1je6fq2+n6vvo+r76Pq2/
n6xvtP1bf6rd936X1vuXU+u750fde4an/IO4rH9Y7ij63v3Rd238fV98fV98fVt/P1ffR9V3+r6x
vdP1Xf633neLH9W3+td73bx/Vd/r+sb3T9W3+r6vvo+rb9P6tvh933yX1Xf6svet9TF9b7pr+v8A
cenk773at/uTunSXde4O31fe6Pqu/T+r72L957hW2Tv29T+4u5Qu/wDc3avfd86LvPcXb613Bp96
7pXL9c3/AE6d67o8i7zvnR927jqt3ruCr9Z7hOPv3c72yd77lQ+s7+frG+07nc23Wbcaulldljhu
tqp2VYfJbf2kY6V016MUyPRW+Z5JyFLX0ZbWWXXkjPzy5fVjdVXYWs818llXI8j2/wCa9fKNz7sG
QvOpG6+FlerJTb5WdNVXQ/Q2/wALdv8Ay5JJ5UpW1LY8CeamGqbUKydslpxWabcQzFbTbLmWXJbT
rTQmXcn5Ekkk8b3snlzZKVxbi+M85WfBitV5H+6L9ufz9fk38xVfp2+GvmLfJr5637e/3Zv/AFX/
ANGz/wBReqe7s6Ythiy0zlV7D9S+L/5l84/29fuN3HZrP/cf/Ef7kvkH82vgW99GT4C+c3Ta7Xrf
1hO9uywvrPs/Req/rGLKr9sy2S3KyLo5buzrVu1qvoKv+dzr2+jT7jWjrsL/ALtM9p5fVMbT7d/7
H8Gy/VwSqQJ1ruLWnYr1Zp6d29LywO1pclF+hKSxPAhWrZUtm0JZ+pVXrWrvozVbyaGq7hvq5PVj
r7GxVVmtbBptmxVxNRdesz804Mhf1m5+FZuuazsxtIV15PbtdHebytN7gzLMVrUvndNxiwVy4ntc
VXlxY1j5QoVmm6QQJy68yvrtZak1H/Ky5k+PJWtsu4x1q4Q17XCBuC94zq892X7c/n6/Jv5ivw7e
4vmLfKdzy5MN6N27S/3ef9V/9G37Rf5re/tHavk6+5vM+XHuV8X/AM/b82XLvmv8BfuOX9m1Vt3h
tV7HizY9xvY/wX83t908t7e+jJ8BfObv9qSb7y8a+kKtPrC6S7O9K7rW3+ryv/KyN9Np2yKrVr0a
28Pzt5+n0vW3dW/9e71v3RJfSbfuWD9scecrlrbC7V8xRWTO3p239rLyD52zqcV61VbKqtprqtSq
dVpwtNLDdJK12qLO6Vpm6iWRKmp1y6+o1ljcT1Mnqp7uwT6160rbJbGsFcd8t1tcqbTM8jktV3Hm
l1crc/BzO3V/Lp2SpaywbfHbp79Lz+26dbLLpWa2rJtb6MWTOtWTLV4m5rqkb/S/L8pRT1IvZ2un
ykXO/oMlZybl+0kR7cokkv6s/J7ffbjC9vvcrvqT7hX5O3x6v9Oz/TT/AMi3yfe7Or29q37I8dfq
2PFWnbFen1POku13nze7ldn7XlXletWlN7at94viuPL9pu/qdbatgv3DP+ydgf8Ak5f/AOe7Q/a/
8L+b2Xzj9+Wnms1t6fNbz9pVo7o8SfalRfVlVLtUf7THWO15PmrfDhK+w3rz7q96dCavd5FXyHY0
13OltXauw/NqfpFv3LDz7Y2nvtny7ne1VnxWdq57OuHsmZ5N/wAqbLG5z5vh3h0taqXsR1V1K3nC
7JmC+S1F1DHS7pTE65Is1j93Ipu6OM/K+SGqXsqbFt59zjtR3b6eNuNVjbw8WaJ5G4MXxKe7uFOH
Iv1WqVd/hLLei2+4yvb7/wCf/LmX522704nkq3uLLpTyq0lK0plnwp6l6siSvKdfzp8X0D+JnlX9
Rz1yO9KvzODU3K3OqV7+G9ceXb9y22Xf5sjxdt2m5vua4bt7fdbnJjz599WrxXeXt3eaVtk2CX0b
cZ8WDuXK3bYq+45certd7rzu8f8Ap1Kfd3/rtnZtr4rf+Ms9seTZ7nG+3dsy5Mm+zw+ydidVucrd
v492f1r5G/zPb9za2+t779eb5bH81ulPa4T7w/2ze7vym+XPtX/08Sf0vJPmcieh1fU7OrV7lezt
tq1st7dP6d2BNdzoo7X2H5rI3XsXaMuXNlw3o+33577tSb3Vue5w+5uPgdhxOu/3eu24q3OX28Lx
Pp3xNKruKU/Vjy3drbZZLUrS1XTHR0x48KyaaRSEsqr1HXEXxO73OLpUVqKm0vRZst3kVpdcGHUe
XZio6VzNKxbDZtYtLx5sejdZ2nntianm6t4rTG21dDuGF+e0nTZeljFrrive85bt4xOHK0obE0nV
pleZavtRCSl46xnbSXCUhWq68bJPJuKrVME+3yM7pCxXu65qY8V7YsqvWtL+tYMlcdtznrkp2LJX
yu5z2w7HJme4yPJWdq0+193nqYG12Hcb9b7LP+o7Je1t7js12rJWvn94kuzJSd3tV9v2t61pXJTq
v5e6h7dwbTePBnzb2/luw893f/8AnMObJiWPdOph3OPc27d8/b38m4w0q8lcuz1WXcMmS1u2KPrG
TNlV99vFusuOfpP/ANOs/SsvzN6t470y6u0bfc17lbBn8vXHl85bDmew7dbLst3Td1ey7Av8vNRf
QtrvbbGmzt7dvnu2KNwvmsagyuqxdv3NsW8y5eruczVbZHGK09N19lVTdcd9TwTR7BI29PZqsdXq
x46q+NWwOzTtlS1Zi1c9i+TJjybvNfI3LKXdcis1ZYcrW3WSt75FVrInXVW7eRzazRWysnvNtV55
s3LsPT07NRto6PcMmRb1ZbIWS8rKrGLNpHlxt7u6tUb5+zCIclHyp7Vm7ttXROROmeys96suN5Mt
Xhz2V8uSjxdRpq96mK8YEmyzrfJmqkNMfK9rVdbr9NtpanNXJk0dOkOrcO17PB2X5fcb/cZcGqyH
kNlma7N3nPptW+46eLHjRXe3WPtu4x7XNRV+k983GbD3DNv6ZthVJmZrq0axJZKvNrflqZFksvYr
kpLveMvY4efPXT/HrWdsSXLtC9htrLsG3tb+7tf2l/uNsU9r7tktg3NrN2KftPeL3pvKp/SsvLcr
nia9vtdV9SzZ9vjolXzmWlfp96Y1XauqO0bnDt9xfeXttcnwtpmSzPI/qPa9zk87az8xjyxTN3HB
gy03R5uzotzRrzKrXJvL2Sy2h5G31Kzi3tqlN3js3lw3tfLXCLec77jHHnYK7rHks82TQsmZjyXT
bklxWzq8eXXXC5pn3HRL71K2XdWvRtspuMuOuPe5aJ77Ox7zK6N8JHZsbNs/0e4Zf82t6s1UFoY4
hqqG6tVeGfLUaeFQ/ZHZPguRS0OMiNe4LVz1Vslprlgrld2ljbpfBS177TTrSMmSj2tNzjePzO2p
e+4rZ9ShksrLF7L1+w2iSrLWbVW0pdnNlXb1y5HXZNUtt7K2Pb31Ytxiw9r/AJA2jHeK47JW9mxR
fqpf6XvzVe4JppVTL1ds70x/3r5XCq1yJynzWWOv27d12l9zvlbYqOlmyOx2vfYtrjWXJPnd0iu8
pauyzXt2XU/qXUuu19+z2x5fOIW6qLcf6X+Q5b07lTLddnzZLecWS3QeSyv5nL1Me4yUt2jcYb9w
zZXbt1rOzrMCyOMd+fUtjvtO8Wx37Y/9hZ/5OTue6yYrZb5Lu0jYrDbh2JJJgkV4HaDUSTy/PHR2
yZeV63aUy+RMElIMdrWrlTqaiR8uHM5nPw1x3ubbA+j3LG1vtLNNhVslF2U11dcjqvZbs3Wyc1dG
aIL7StcUM0s02LY2lIq5Sts0a5TxVsUTmlb2q6LTZUS0O1rY7J1x3jRlLY8mqlMhGQsskxfTFyqv
qVcjtqrFWm8d64aebYmmK15tTUu946MVbzXqMXUmtr1Mbvbs3fqXt3J1y1eNZbmLE1W2Gl1oqstq
X8noyJUrkTfmEPr2uupRWsrXzWrZLkScj86+vBv8mPZ9t3nlt9jyVv2b+R1Tu62iIdG/t/8Akdb/
AFKsfRsvPfKtuhZPUqtNyYc2fb5vN7547J1csWqWpaqmrVTXS5Yb9DLg7tRqlaI0UjQitKzXSiOW
mg6VjRUSrVewVvWo3WYqzTUWlLVVPFfp3s73v05EtL00b9Q7tPXdq12xV1NYmdEeFMeFjxtrpnTF
RTpSNNR1qcjbaejv61tveliOliOjiOjiOjjOjjOhQ6VWdKp0UdFHSTXl8Z0aiwVT6dh4FZeVxlcS
qtBpHt8bfl8Z5bEeXxHQoeXxnQojo1OjU6NTo0OjQ6NDo0OlQ6NDo446OKFVJaKmmqJZLZ3i2DM+
ljFjojRQ6dD2tN08lunQWKqK5KxZbez9jqp5Ftcj3Nlp3DarkH1m2twrdPK3amR1e3y6fL5Dy901
XIlpzTpshbe09Kxuc+53T6VjpNFKf6Pv8vf0o12nLR+drel8bq1f1PS7DxXFjyIdLs6LZ0WhYGjo
2OjY6LOjY6LOjY6Fzo2FgtPlrHlrHl7HQszy9jy9jy9p8vY6Fjy9jy948vY8vY6FjoXOjc6Fh7WT
yiFtoHt5HtqjwIeFHRR0anRodGh0qHSxnSodOhoqdOpoRt6Lpb2PO8jkcjlx5nM5nMhkMjhKNSNV
TVU11NVDVU1VNVTVUmpKJRKJXjgg5EIhEIhEIgghEEEI5eKtqvM2vLRwggjhHgjwwRwx7tvbd6+e
r+25Pmtiv8yy9qCDSQQQQQQQQQQQQQQQQQQQQaUQjSjSiEQiEQiEaUaUaUQiEQiEQiEQiEcjl4ZG
yTbv9LfL/NS8HIheLn4oRCIRFTTUippqaammppRpRpRpNJpIIZDPaPaPaPbPbPbJuTc1XNVzVc1X
NVzXY13NdjXY12NdjXY7Tez7jvr2W81s1s6jOozqM6jOozqM6jOozqM6jOodQ6h1DqHUKZVW/cd7
i3O5rb/WZ8+LHvNhf/Mdva6h1DqHUOodQ6h1DqHUOodQ6h1DqHUOodQ6jNbNbNbNbNbNbNbNbNbN
bNdjWzXY12NdjXY1s1s12NVjVY1M1M1M1Mlkkkk+Db/C3vzvGGRw5ceRyOXjhkEEcI9NC8MEeh5c
IO1fuG9+cghEEEIhEEEI0o0o0o0o0mk0mg0Gg0C3rW13u4e73Gwr/m2XtaDQaTSaTSaTSaTSaTSa
WaTSaTSaTSaTSQQQQQQjSjSjSaTSaTSjSaUaUaUaUaRpEIhEIhEIhEEGk0mk29f0t787JPgj8By9
BBBHGeEePn6TtX7hvY83KJRyIRBCIIIIII8XIZBHDYfOP3vw0EIhEEemhcY4QQQQQzSaTBX9Le/O
rxyT4II9HH4iPH2r9w33zngkkknhJJPoJJ4bH5zPucWHMQR45JJJRyORBBzOZJJJJJJJJJJyOXhk
kkng3x5HLhyFBAkaTSYK/pb3530Uk/hY4wQR+C7V+4b75z8HHDmK1k3a1n+G5HLxcvRw/BHCBISF
USFUw1/S33zviggggj0c8I4R+OwZr4MuS9sl/wAJPoGSST4I/DyT6RC9aEhGGOnvZ86v7Pn0sk/h
o9DAhHISEhJCQlywr9Pe/O+Bejgj8TP9oSSSySUIQhNiYkJIwr9Pe/O+CPxUfiJ9PPGfR8jl4ZJJ
J9DJPpH6PkQKCBCTOYpMPw9787xn8UvRwR+Hkk5+OfDPhfBv+jciOCPUJiYnwUGGOlvvnV+Ekknw
zwnhJPCfFJP4CSfRyT+BZPop4ST4pJ4z4OXj5cZFIpFImxOwpOZhf6W++d4okknwTwn0c+hjjJPh
nwSSSSTwkknhJJJJJPjknjJJJJPgkkklcJJJJJJJJJ8Ekkkk+GSSeEkkkkkk8JJJJJJJRJy4IQnJ
UXITTMPws38TplzfaGM+0MZ9o4z7Rxn2jjPtHGfaWM+0sZ9pUPtLGfaeM+08Z9p4z7Txn2njPtTG
famM+1MZ9qYz7Vxn2rjPtXGfauM+1sZ9rYz7Wxn2tjPtbGfa2M+1sZ9r4z7Xofa9D7Xofa+M+16H
2xQ+2KH2xQ+2KH2xQ+2KH2xQ+2KH2xQ+2KH2xQ+2KH2xQ+2KH2zQ+2aH2xQ+2aH2zQ+2aH2zQ+2M
Z9sUPtmh9s0Ptmh9s0Ptmh9s0Ptmh9s0Ptmh9s0Ptih9sUPtih9sUPtih9sUPtih9sUPtmh9s0Pt
mh9s0Ptmh9s0Ptqh9s0Ptih9sUPteh9r0Pteh9r0Pteh9r0Pteh9r0Ptah9rUPtah9rUPtah9rYz
7Wofa1D7Wofa1D7VofatD7Vxn2rjPtWh9q0PtTGfamM+1MZ9p4z7Txn2njPtLGfaWM+0sZ9pYz7R
xn2jjPtHGfaOM+0cZ9pYz7Sxn2pjPtTGL+L40fbFD7ZxlP49Stf/ANK//9oACAECAgY/APouRMl+
gYv8bpJdj5uXJBqJ+P4vy0ELy8ei8N3SKaXJ+TgRTisnCx3XN46lrldZdpyXo6y8qMkIyivqaR7v
HEvMi5JYF35HDFR1F75Mv0IUHHX5Do3F/l2+OJi+TLpd/sWhqDONOeBrbu7RN6b0WVbi8tcPHYN4
ZjE9ZLejUcV1uVicZX5GKwjxWHCnGG3x1NCd3VKIlUf9hJCoILFs8finAhfVaJHCondCErq4Yev1
+tJOTk5OTk5OTk5OTk5OTk5OTk5OTk5OTk5OTk5OTk5OTk5OTk5OT58qPD3mqo8Peaqjw95qqPD3
mle4VGeSZm80r3Co+g69wvoOvcKjw/Saczr3Co8QWZ17hUeHRQnOxZnXuFR4evIWZ17hUeHpCzOv
cKj6Dr3CozyrvFdhHXnde4VGeQ/Xu9D0wiVrO+bcKjtrNYQ/WruuGqsajJxDp76XuH2ebcKjOUoz
S5at3bgnKQSnhk5h/IvU5IN9hvUQUZO6j2X2a9wqM0dyfIlMmsl5L+JUxwmyVbst1RcJMj8mNJxK
2c2TnH8i9SQajoL1IqDhcjjDJfjNdPUahBrul2V2K9wqMzhCeA2Qek5i86bC9+0WP4svtEszOKE8
ICbnEk5JLReTRwTRL8szF56RUsrEplJA5x/IvUnoXqRJORJHMiE2gv8AwZUy+9SE3RmVe4VGZJOE
o6WM9DmRfovD+LKmXqUXmRhLpgXWXKLo6TmL/wAWXRUsrOZD+e8fyL1J6F4qRzI5mX/gypl+hFeZ
V7hUZj+OkTUPUjJL5kXD1IuHqcUkaRuSWM5JD1IqHqNSS+ZBNS+ZGMtIk2pPMTkk8yLhL5nFFRpH
o/LTKRUPUioRpIyepxN3Y0qY4nCMY6SLh6kXCXzIuHqRcJfMi4epxSRpIqEaRwhKoaSKh6jUkumU
l0b+7falK9wqMyXLsYuXpPTpPXpIa47B82xD5tw6b21Df7Q/7EPvEenUJfHaLl3l3k2MUf16WUJd
J9+wdfSPn2IfPuHzbh17UffuPv3Cq6hcu0XLvFy7GKrpPv2jer/EdO7MUV7hUZk5BJrV0kNX+IvP
/ErewvJftuHzbh03tqGv2e04qVaenUR+O0S87u85ru8dK2MvfG7sPv2Dr6R82xDS/bcPmHXtR9+4
k8uoVXUf8dpXd3kNXDsZe+K2H37S/wDcxTL2F33NuFR3MncaciH5OX0JDmewbnjFVl5a9A0v22oc
fcxLTOenVkjD7RXd3mgvLySsGvbDpgNfLpGlPxbEJ6hr5bh0vaj79xVd2iq6iP3IR+I3rL3xWw+/
aOK0w2dmXvubcKjvF29GW75tuzLXe25VV1ZY/cmRvXkvOmHoKm9sWSu/uHTe6kffuyRiprH2HCdD
itEIemac24VGbJ6o9r06jR24wySz8THTe6kQ+8Wd824VGeP4b+6T1Esi/LbEqXVnfNuFRnnJv7yT
Utud824VGeR/WHg9e4VH0HXuFR9B17hUfQde4xWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWI
xWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIx
WIxWIxWGKxGIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWIxWGKxGKxGKwxWIxGKwxGKwxGIxGk0/Q
v//aAAgBAwIGPwD6LlaRJ9A8K/K9spJb0FqUiyRThQfl+a89PqRuulavHoLFe0eXmN6ZMn43WyF5
QfnkV67/ALivLQ/HbzmUioRe+OS5D2x9S46VkjCQdy9P+S3+ON6kQUskWXvicUHBzzFz4ouLzY4p
rV5ip3lzm2eONa0P4pF770F7+p6U43TgvXdCiKSCu3rENLRcgjm3l1+3ifjsUpL05gWokuwoY7t5
KRDuwkTmY/xtIcPnpOJqHFoo8dd1jV5TyDPxcNOikbY6SLw7dHj8LyiRuXqrxKo1mhXaWN3nxRmm
JPr54T+J/E/ifxP4n8SYmJj+J/EmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmySwzucnyOnwSXu1m
rp8Pu15q6fD7uaunw9frmlW8dOeS5lHNat46e6j3sFnHpmlW8dOaxedQhqzSreOnNYLOYvSzhjLC
OgbhLd7rR3NW8dPao7yXPFDUaRpzrM6t46fDoOYaUw1Mszq3jp8PcukeY6clW8dL8PbHmdW8dL+g
6t46X23Qs1r7x34w1VZ3y7x0vturNYz8e7u1LdlUYRlKnnfLvHT23TkdOZxj+0fPunfrqE4KPBpF
R30ncLs8u8dPbdOR97U8rbSUskuWvf2+JqQd53po5OQXwLtGSKutrsJKcTcJdXcPyNKIZdORdnl3
jp7deR92r+lZG57ybyXW1ilK0KM+SvflvOCjLku+a3ibTSOVHLk5BfAu0CvJw0xLtA6WcalUI5P6
+Ge8OlCcYp9ik19iofZfr2OXeOnK+xXkfdR4powEqR3loOVlxLWrS5+rSF8kf1pkk6icMZoxtGlM
N3lLLYPzV444y8MT+uT+SHJhhAfxQqBudqMaDk3C+JdoPXaXaC82ottnIzinkcaS580VouLzGlo0
9iPadJ6ZPUqyqk5d46ew8kkpX3babii78UKsrvHKy5TdF87pWi7Qy4l5nDGTDHeX0/Iv03y7Qcp/
X8rpeoH8UVC+LF/89xd+KLtBXe2l2grvbTlZyoufNFaLlLOUq7l0oXZYqUcu8dPYeWvJX3C49A0+
KD8iH5SeRBcUnkQXF6azhhehQxKWChCQlTc+gg4uoTlitEgm03DyIQk1cI2k1HTJqGknLGMjIKMF
5HDBw1QYox/GVSMg4y+RwvihQyH5ehwpXoaoOc4VxQhDQxJcUKCC4oLyIKPoQUYLyIS+hwywoOFx
ho0CjGRx0EHGFAnLJokE1Hi3d06ULtQKt46cryxy19y+fah83Uh19Z6dZHVw9Rd5drLvLvFRd2MV
39Y/9SL8rbo6+gbX7dKL1F/Yi9z7UNr9upEHO73WffvFV1C5Opl3k3l3k2MXL0sg/v8AAdfQPm6R
82xD5tw+bah83UffuLq93+Rd+O95NJp7bpQu1E5d46e5r7lRG044n/2RHX/kP9f8ojf6ray6/htZ
d5N4qLuxif6LYcMPa7D16R3XqvWob1q/uOW/tQvi+pF353uo+/eKm71F3k6mJubh3i5N4uXpZ9+w
l13ukfN0n/PYVXtxHXxbUXfle2n37j+un/0Q1KFvYfadKF2kaf47x09zKSEpJlrySohAgXfO7J6k
Wcq2sV2ZJJ1FxzXUkxN6ruxkntWwjokS9B19GSCcn+py39xpZcvPW3aXWlij1RE/j1Cbm4XaxqZi
erg3iou9LPv2Gme90j5ukgnrtIeV7cXU9CZd+V7aV/8AsTTwt7SOSXJIack3dI5d46TSae6Zo7Wn
JKXv1V1W5armx5XX0ZYTf6kBL25Lq+MXqheiNr23dryVXN4qLvSz79mSEH/K1di7FyXXEXC9PFH1
iackndQNHa5d46c2a90LO06+g0s0v17TWvJJNwLaLl6WR84el0mzrl3jpzXSaTSaTSaci+e7umtc
pJK/x2QZzPped8u8dOeL57u8l9z6c75d46c84Z+KPg9W8dP0HVvHT9B1bx0v6Dq3mG1mC1mC1mC1
mC1mC1mC1mC1mC1mG1mC1mC1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1m
G1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG
1mG1mG1mG1mG1mG1mG1mG1mG1mG1mG0wmEw/Qv8A/9oACAEBAQY/AP8A9RvOQiM5ICY6xh9ePvXI
xIz6MgeD/oEcTGmIQFpJZGGpw7U/nlSO4LSjex5Qj+XD5A/8aomRJJtJqdkdjjziB+FyY9UuEI63
hjEj+eHJlvWH2L+RiAyywNJDc+31eXjS5uGOErtceT8phHIA2T6AThIxkKiQoQo6vrpaRpHFyHpJ
xUfbsytxZUw47edSxcWRlORckoy/H2nF9FHU9Zk8TTCmcn7p+3UsSZaMA5OhSxpc2yEc0dghheMr
93KzJjsDCwYmc5WAJ8TFhCR/DUp8UCWGbMSNR6Li1dniF8fCYS2xkP25jqsDysWs+iNhxkXb3eUI
PtOKIk2muxPWSOXOV0HNEbE8HEDxmCCp4ZtiSN70cPFdoE3Z9EoEWH7cYsneMDcjoGyz/gNN3Zh0
pcOzjdOXDszaQjcz7aw8LFsmQBIZQmuXjnkVhyJ5UXhLTEt9t5z/ACxJ3gp6yYnszLnHOTss/wCA
03dmPSlw7ON05cKlEUAKdqusXcWq9Lj2NZwz+DFpoI+77b4p/dKAFByVLWH5QrEbQ2LrVuGmVSGr
xErnOcgcK7DGAGJSjvahhza8JEltuuzLDnyRORkNsWrt8AC5LlSJIDBdjdheus7UvZ1jxtYgcK1X
pcexr4zTh/H9t8UZ4HgQ/wAKhhm242+mnZGddwq+/IZ32ljQFI4wN3cLrD1oZ4xNF/MN3DkA8jkN
ieJBByhHDjIHFI5oNQDlK1cCsjE8JTE1phxBymVNjWRml71qvS49jXZZ8UDef3/bcg2EMpxZrsgN
4qHRHApiRaIkTI5gDVXhbdp+W67cCgcKVLwlCQzFHCxQ784ZjnCxYiJkGoRXKsIAkckOLFj3YkvG
NQF/dYv9URujNEWrVsOH9MTBbOXFdjWgbby1XTsTkQxxcSU/aw9n24MJG7DEq+2EIiQoGtzKb1OI
eUXqjgA/y5WglQAocM8kugcPE7OQLuD7E2Kz/mHuQxIwiQcrBHDiKxLEBgECcS7Af5YNDpWHimVc
N2GlWjfWJiiVcRnD5lh4spscMuAChGJBJLAKMI2RDfbiWH+IcqDFqhESMhMYZkQ55wLKTSlQYZFT
+K1GN6TDFELTYQoPKVZTBqbI2KMr0nOEZmptBU+VLk9k1T+O1RgHaJkA9cqnImTzxMZ2P5bEDelX
B7S0852WI0pcgYZFT+O1EPKmMMO081lhuZcuWIDU/gsQN6VcE4lp5wLI63IkxugYYJepHKP26xNa
wg5uGMojhUYDmzjhvuKXfjgRwyeTEzIGl1DuJcKxSbAMEneQjhSicM4xjFx+E1QEh/MBxZSOQ3nK
j/xj8SOGA4xBhOczAKX/ACRwLB6eNwLCix7M4JjOWZ5KOHANGIYDR9umNiGt6tViL+HmAyhS78cC
nMhw8gw21hPW9gmNMjlY/RwfhWH34+FS0T4Co/8AGPxKGiHAFL/kjgWFcHIhPFvyyAGijg4YpEW5
T9vROP8ALxBIScWEjOpzlhmWGSSJxqKpjkU4mIF8RGi4GWH34+FSnChcjfRE6/yzhxbJV1COFAzL
QsD5ApT1g3YnE7SMRbYwdCGHERiMg+37FE4mDG8fxRofYicPFnDaoVGMcZxGd+o2mRlPHNS7ABAy
icQ/vGm8mw4COgf/AKDlhylK9AmJ5OUFlzp9Vc6fVXOn1Vzp9Vc6fVXOn1Vzp9Vc6fVXOn1VzpdV
c6XVXOl1VzpdVWy6qtl1VbLqq2XVVsuqrZdVWy6qtl1VbLqq2XVVsuqrZdVWy6qtlvK2W8rZbytl
vK2W8rZbytlvK2W8rZbytlvK2W8rZbytlvK2W8rZbytO8rTvK07ytO8rTvK07ytO8rTvK07ytO8r
TvK07ytO8rTvK07ytO8rTvK07ytO8rTvK07ytO8rTvK07ytO8rTvK07ytO8rTvK07ytO8rTvK07y
tO8rTvK07yy7ytO8rTvK07ytO8rTvK07ytO8qPvLLvLKsqyrLsZUSMn1C0rKrSsu8su8rTvK07yt
O8su8su8su8su8su8su8su8su8sqy7ytO8rTvK07ytO8rTvK07yJBPJDmm23GsfvJ/Efs9LBxZtO
nJo9bGBtVPU+L0R8UVj95P4j9iq/VIzlEGTipFWBpw+qMXoD44rH7yfxH7EV+rQ08Y9UY3QHxwWP
3k/iP2Foq/R1+lw7xYk8n2eqMboD44LH7yfxH7PRwxhi5hCM8OZc3pElwwkLGUb7X2F5rH2vU+P0
B8cFj95P4j9gqfXIuH5AsrlOdDQPU+P0B8cFj95P4j6/qqfXok/l41HQPU+P3Y/UgsfvJ/EfXtUw
9QQJEblw3jKQDF9tRMJRkAA90gtvep9Y7sfqQWP3k/iPruqpsV+vyxJFowBkdxYmMYiUsSRJMnkw
OQGUoo6vEARxy1ItWIcVf1Pj92P1ILH7yfxH1zUJgPUeNgx504kDSpRnhYk8WBaYN4AHQGCGLh4R
w8PBLyJJZ2oK+p8fux+pBY/eT+I+r3P0Dn1PN/yitcwzLWQLL700ep9Y7sfqQWP3k/iPq2ic27FP
Ros59UGrckVs4LVrNX5QNr5PU+sd2P1ILH7yfxH6o+T6wwCc2+se0D3SA5e6tYlE3okggiQlk9T6
x3Y/UgsfvJ/EfqbJvoNr6ht+ssTFxI9pOUQeXURp+HMsaeHhiEyACRR4v6nx+7H6kFjd5P4j9O/o
O1T6Nis9N/o3+gc+q5yjhmQIABdhQbqlKUYwiQwaTl+qFUW+psfux+pBY3eT+I/WrPqA+gc+qpYm
IWjG0ojAj2OFkJodOfeCvaxrYfNKRJ0VlG1eLi7+3r5EP7bXITAHMkWEgNEirxF2X4ou7fT2fVsf
ux+pBY3eT+I/TOfrFE5TCz1pHAgxMQ50mxATmTE0rZTasqVCWJIxjKQMmjYclhsRGHKUovyCKPLP
lomqBYHBIf8AFkFEdXGLHFw5ikovde2j/SV+ks+lx+7H6kFj95P4j9KAm+x2KTYJEVzWIl2H4jZ0
TkKbDw5zLXZMLdt5AIPgYrRFwPD22r+ZhyAAZzEigtO6oFmY/sNz1Lj92P1ILH7yfxH6V8p+r7Ww
59bSnGJkJC/edgNMjTIgMIxDUfChfkNq/Ngj2uJjnM2KMP2YcUbpmxsMsfEJCd8RhbdxTL2YgK7W
OITKNuHKIEgch5ND6lx+7H6kFj95P4j9IPrDBV9biAndALkGwoxEKRLUI300ixFoK7U1jkFiuxph
Br0uII9nFibTlPqXH7sfqQWP3k/iP2dlucCxcSt+I5LKOHiDlC3dUtPqbWO7H6kFj95P4j9nZHaH
AsQRDmViaVCc21RS0+psfux8cFj95P4j9nZgWkUVyc8acxW6AIhE3TBpGki5UtP1qg+kx+gPjgsf
vJ/Efs6dAU5Vvwi8SLUBic53O6pafrFfpsfux8cFj95P4j9nSdpHlACQaqLG8xIfQpepsfux+pBY
/eT+I/Z1s4QjGEADUSLlG8RaWuhgpepsfux8cFj95P4j9PIhjCJZlUSfMyIqNITPefN6nb1INHGs
QEO8bCrrvl31L65UsmMg+YVVBJs7K67HMfTx+7H6kFj95P4j9NI7RROeWx9yGlD1LEbYUht/VwBa
bE2JEx0/SDQixNaEAFckEVyqX1uiPbSoDZkCpYHszouQHfLtBVIk1jenrHdj9TDWP3k/iP00jtKP
SORf9wsm/wDcnzHaQ9SwH7w4VPSfq8r5IuMQyxAQ92NH+kGhPhyN0CsQzqRcmuVS3Pqg+gkAWDrO
txDT6esd2P1MNY/eT+I/TSUN3ItzOhl3lZlQ0epcOMbbwO9VYkZWufq+KdoLF6J+kGhF8yaFBl0o
/TC9lQIsNfQKG7sZzkAQJrLMLAgdr0JadjcQ0+nrHdj9TDWP3k/iP0x0hQ+9W5M6H3I0y5kNHqXC
I/MyxX/MR9VGsaxerK7ydCxZYF+9hxvVsWKdCxeifpBoVYx0ko1BIOTQpfS3pbgV6zMEx5UUJQqD
lUoyFgCoAFUBVscq7C3OgI2ksSVoJFijo9CQ29kaR6esd2P1MNY/eT+I/TboUNGdbm0hT2BHTmZD
R6lhjgNhwNScuhTxyHw5ycEZHz/U4xzkBYmGLIlgrmKL0b5LbyxzhRukxqsU7YWJ0TwfSR0IEMJA
PpUiQ1UdH0pzCgQADqxTiaxZwNtGZFos3UQc6JkWBsCayNqugXdrKon8LM6IfLnUWL09CZ22Q2Bp
Hp6x3Y/Uw1j95P4j9MNKhozr/shT2I6dtDQPUscONJ4dJDjUsOVZ4lIjj+p/3M63C0Y7edDWY0kS
0hn21/iKxuiViH97iUx+6eBH6OKfaVQzI6PpCQCdoISmCxOetUOSEHiC5ARuAPdJdDQeFSIL1CIJ
NDlUnKMpMALCSy5eKAPyhyiTilgHoE8JzfcQEQX20XDEbEj+9xIadgaR6esd2P1MNY3eT+I/TR0q
GhWIMG3EdO2o6B9anP8AI3t9ARFpLb6MJ6xASjQhirhIlQEEZQfRxhZyQsCN680Occtfp2AddoYS
EPzNsk55Ff4gh0isXolT6SloKlpP0YTSmYvYAEQCTUVIbItz6EQzuTuKxQayVPagAap8o2ttNKVG
TOdKtJfOmBujcRBLsbdpSbPkROUMz19iLxaQLFsqrHfRsFMgyIs28EZyAcHJsnbkhp2BuenrA/2x
+pBY3eT+I/TR0qOjaR0ZkPvR07ajoH1rHPR4fQw+kOFSxZmQlOpYoRjYIRA3PRxnsuLAuUjcpof6
XDxL5E5xEtqoQvsYSPJkFhSEBeMQSWWMP3TsjpFHSFHpFYvRKl0kdBU9J4dgzgRGMSzlUxInfUsO
XOiWPp4eFih4F3A0IXsESax1rEIxEcMTNwDMtz6AQtkciM9op7w9qw8ttmaicAgCrmiYykR+UU9q
kCAKWZaI0NOIFSBegpTcRlV3sbbRk7Eli2ZDQERnZTpaaJiMyIJroXJiTbkRBhJzUMEOSXO0uaUQ
I5XTiIpW1MbRahuenrHQH6kFj95P4j9NDSUNCOhD70feo6B9ZaX9OFZe5G5ERrEUDUGycTJEgHdW
F0hw7ET+6PRxeisAyF03KjNX0ezxhejdJA26I4mHhiMwYgEbOLI/5cQRuyA49iZdrgfTVtjA7uPA
sLpcSweiFij908Cfb2I9IqekcKjpKxeiVLpIrGOFG92ZJI3UxFRkUukdjG6R9EDOiTOLkclQw5VM
TIE6BsYU4AXpxYtaS6HawMLwcP8AQE5f/VFxkVwAMCbEBEh23VIyBNDUrntHaFVIX3nnetq5xyZd
sqDGxxpojU2nLtrkGxnc5KqNMgUSa1CIG+pAB9NVZ6cgc5TSDGnpNnWsRcP2Y+OCx+8n8R9C/lch
tCrF1UEK9E09OG6hozI/eh70feho+q4wxoCZDM+267XCc4J/8dnFnlMgEekEY4cbxiDI6BsYhoDi
EShoisMG0THDsYeHG2QAG6Viyly8W4eUclMnoYue6FgiReVyp3fR/wAEuJf448ezrmI3JEIh9u8D
sSlKQheHJFq7PFDS2lgd3HgWF0uJYXQHAsQfungW6EFHSVibnCoaSsXolS6R2Na6XGpy7OLkGrDM
pdM7GN0jsS7WAkRK3KpY+DIgRtia27A0hDQEM96exq2UMPiQjixEqMHyOpAF4gkA6Nk5wWRrZZto
l6iwbAlGki7ncRPDuomOSVm0hei8iKMEeRdi2U1QMpm6wo7BSjFnapVIhqZTnKg1j5NCq37EI3SA
QQT7VHohQ0omRaqmALIu6mXDxIALLCYhpteDKcTKgAI2lCV8uS2mqnG+bt0sMyJlImTkArDqWID5
rFI6FHEFoYH0HKYG7toGsmLrWcW7K72Yyf7mGsfvJ/EfQPKLXjRwsvs2BpKc0CcyDZ08SCNr0Ibq
GjMUUK5M6P3IaB9UMxA3BbJqLGLVcVU8OQcSBClE/hJG9sYnS4kekF2xrLF4MykIc2XKAzOsIfuq
IxI3SZgjQ+xq/wDh4VjdCXAo4c+ZbLcUcfDjdPNkBZtbGJimJEJBgc+hYU4gyw4RumVtXy7Jw710
RDkozw8Qi7Ekg5WU5flwyN8hT2jE+1YmJhhxhBzt7QWHjTDRxLNrSoRERdxIxMhnLI4DXcOUxFhk
cBRwovdgGDqOMHv4pLvZyQFgd3HgWF0uJYXRHAp9E8CjC9yRWZ2tpDHwS2FQSgch2lEjOVibnCoa
TwrE6JUukdjWukp9E8Cn0zsYsBSIkTM5gjLV3vxqxNoWID+ZYu5wozxHhhCw5ztIRlWBPIlnCGgI
gWOfbHY1drAB8SGhTw4Ayv4hAjpKuANAiJjvV9qGlHpKvAvuX3IGGR9rIuUav71ItVygIwvOBXIF
IyIEWsCEi8iQKWo8kR0sgWH7FCyhf2MjYrhIjV3G6rjvdADqB20ctaKb05KxDeF0kbiweWKANtrF
F4vIWNtqDyLRObbUql5RNExvECW0sKF0uwunJYoftlRBsKps3sxRIzq9eL3mbaWtF69kM/8Aq4ax
+8n8R9A8o840Ry7o2BFmltp5F0EDEsrsqT9h2Y7qFlm2sm+vvWXfBQ0fU8KXZxvGNS1VPB5gkLRt
LHjmkBvOisaOISIxJNNKnOBleiHDmixB+8pSFt6KwZG0hYGHIAxkA4KEYhgLAFhf4eHYwP8ADwrF
6EuBYsmowDrFEQ5awadjCuc26GZYt/m3TwbH83DgccgsTW2x1i4cw0oxIO4VjH9yXAsfE6MeErHj
+4T1eVxLE7z+EIbU4tvFYPQjwL/6x4tjCOaRG+PuWD0I8CwulxLC6EeBT6J4FjSaoID7ixzIOBAl
Ycs5PCsXQsPd4VidE8Cl0jsa101PongWJ09jWv2yqWgrF6ZWLucKweiFg9NDQEYvRzT/AA7GrtO7
SNHZ+UhoWNI8qOHeIJyEllDFug3ZMZZWIQUulxIdEWJs2ySKVIRLENthNGRlkN0ZlN4kULE1yIEz
YEWRDKQjMmzLpTPI0ynaUmJtRqdANMqYu+naR9mhBARDHKcqn0Vii0UsCwTlDUbbWLeBAZgW21GM
iYlyz0yozvcm6btuZHOZPVYUn5EQHNrKBjUPXfRTZtisiYvlkykImNA5eT2bB6Q4FrVB/SGb/Uw1
j95P4j6FS3KOdFAjOnkaCyiplQtZVQKBepGxHQULbM4Rt9iDg9VUDVzMho+oMKk2K80QT+Emqu4s
SP3sm+sHoo6FrHT9+xrQ2zwrEH7p4FidJTH70bFgA/lWrBrRbsYANhbY1Wf5rvsksXoS4FM/vngC
n0TwIppyMpEGcIfupolpACU8P90oKOgLWNt+FY3QKxDnnwAKcc8SPYsTvOIIDPMcBWD0I8C/+keL
Ywh+9xLB6EeBYXS4lhdCPAp9E8CxukOArWO7lwLC0nhWN0Vh7vCsTongUumdjW+kp9E8Cxensa1p
41LQVi9NYu5wrB6IWD00NARo5JNX/d2NXiwLiNo/eQ0LXGADWkBspR6UU35S24pVu1FSoPMDkiqc
TBLUDFVkBuI8q3aKMQ0qtUZ0Q7mi5ECTRyaZApXgBQsBoQOJMmgtLBSuEMLuXK6dxZn2lK77EXAy
VdlUBmpU5kWYhBAk25ApABgyxLxdrGooTiTeZ3UiJyOcZFyqk5SXTNFXiWLtQK7EuLXIQvHLYt7Y
uBxKVhCLl2R2l+33o9IftkWtd1H9TD21j95P4j6AcG0szo+8obFH30x4UNGxCDsMroxiXbKoaCgc
rDMjT2J29h4lu7aGj6hDpDh2I9McCwejxo6FrHT9+xrQ2zwqfRPAsYfvqWkLC0LVtGxgbmxqjSZi
KN+8FijPCXAp1flmu4sTongUcMUBPKOYC0rEng0MIXI4Z/dsIWHi6wf6mH2ZgNs1JRwjkNDnBsUd
AWN/i4VjdEqXTPAFLQVidPiCh0xwFYXQjwIiIe7MSOgDYjMBxCVdoEMsHoR4FhdLiWF0I8Cn0TwL
Hq4vht4rWe7lwLBOd+FY3RKw93hU+ieBS6Z2Nb6Sn0TwLF6exrQFoPGpaCsVq8tYugcKweiFg9ND
QES1XPw7GrTlEFwAC2aSGha7IBqge0rE2jE/+QRz2qV0OX/a1RtrEZhnTVZs4W3tkIASO+pZnzvl
ComERFmqauGClek5YtRsiiZEEsHyo3Y0F20baJEY2ZRtFTuANlamZGJtpaFVmAazaRIs2AwyKVQS
xoFiSvSbLRRrK61LFOj0QYZlYUxpyrCE8eUGtAURINXMtwbEJ5ixUm/bhR2JDK4LLWrf6Uc/+phr
H7yfxH0BUWnIiyCdtxBPRHYeQyWoAVBNigNooUyZlKnsKGjbQrlGfjQ+oHFv3bkhRtgYcCIkSeqh
hSLmAYkIhYt8g9pJw2xjY02uYhN1jWpdEZwymBK9fN7MjhXrrkF9CjhPeuhnWFrd9uy/C1uxh6yZ
Ncbktm2MHGM7vZF2Z3q6lB2vAh9IRwr995XnZlKNl4EPpUhWUpgiUsrHMhi4d4SjkeiEsUEmIYMW
UbYzgAIyysM6AzUU9avvffktnU8IlhOJi+lHDvXnleexEZwpYd6/eleezIAhhXrrSEn0UUMN3uAR
fQFPW773w11tHu2J4L3b4t3XUMN3uREX0BlCN65dL2OowtugB9AZSi7XgQ+lTjfv3yDYzMsTAe72
kTG9mdQ1a9fuPyrLS6nhO18EPpUcF713KjHOG31KAlecu+xi4hle7UuzMylGxwRvqUDO+ZSd2bYx
sYSftS7ZkRnDKcYyvX5XlLBe7eyqGE73AzqEb124XQGZT1/tHvhrjcexg4oxbgwvws71dALGn2l/
tpXrGZTwXu3wA+66ng3r10CulipZajI+ZYdopkBzlAAy0gJmJOYgIsCwy3QpZ3ysMyL1BDMmMmAA
a7mZSaRJNrl8iiwcsHYZUTclFmybavVY/hYZip5KZRoRodyqNJb20qUdBBzRkdBWKGJBbIoMJOBY
jybdtBuBCp3ghU84ZBxLnS3ggDIkPlbiW4Ng6QiNpFWJgBXKtar/AJQ/Vwljd5P4j6A23zKhCGlZ
AqmyxD3ehEaKoRJJAFEKewhH70K5M5zKNfxDKh9QOHhxErxeqOH2AvgOY1dkJ9jG7I3Qa1KlDsBe
gHkK0G+oy7EXZlomtSpR7APh1mK0G+oHsQ2JzLaqZ7ANh0nbT2qMewDzrEVr7VKXYxuwLSNaHfUY
9gL0g8RWoUpf247OMrspZjmtQw/7XlkXhHK2e1dp2AEHuuxtzIw7AXwLzVs30MX+3jcMrolVic1q
IOri9EXpitBntUJjAi2IWga1O0pPgAGHPFaKH8kfzOZbXQp/yB/L59tNKgOwD4nMtqpS7GLQLTNa
e1Rj2AvTDxFajfUp9jG7EtI1od9CHYC9IOBWo312pwY9m929VnzWo4fYC+BeMWLtnUcUYUezkbsZ
VYnNapQ7AX4C9IVcDOoz7GN2ZaJrUqcewF7DDzFaDfUD2MWxC0DXlaFMdgP5fPt5Omqh/JH8zmW8
rQpnsQ2GWma0O3VRj2AvTDxFahSxRgxuwN2RqwNmdRw/7YdpMXoxYuRntRxP7eNwG6ZVa9mtQwo6
rE4hF66xe7ntQxjq0OzMrokxZ81quHAF8C8Y1cDfUZ9jG7ItE1qVKJwA8A8hWg31E9iGxOZbVSHY
RbD/AKhryRt1UBDVxI4v9Oh5WhYh/tothf1DXk6aqETqoEsUPAMeUNqqlPsRdwy0+SaHNaow7DlS
DxF0uRvoyGDERiWMpA0Oa1dl/bRMmvAMXIz2oYnYRuE3RKrE5rVLDnq0RKIvSFXAz2qOIMGJhMtE
1qVKAwI3oB5CtAsMjAi2KWga10VWIDq8XwufbTTVQB1aIji/0zXlaKqRGrxIw/6lvJ01WG2AD2oe
FtfapzODG7hlpmtCpY04iMiACBtKd0sXFbMyw+U0mqX2yufWuUp7wyZ0SZAg5CCpsHDhwKZs6L2Z
E+JOjBgS2QIjDbbbQoiOGTQbSN7DaxmO2pcmVdvaKk4YgUeuZMIuKVdcy0fmGZZs9UEKOWyqtjGg
WNU0Aau2oSqaFy+wFk3io9IZFZ7EPctwbB0hFsoDftVS5LNlK5UwNoEngXaibgVZa0f9kH//AFwl
j95P4jslAyF5rBabUAYVlQBEXWmA7jYNfYgHsWjZEmsYq8zUsQs9vGi59pVDVrHUavyhlf6EREXf
K7IgCtWrkREwZvYc1o4kA1SW9GIBq4WN3K1bvlrndLUO94yvMOhxLy7pca8w6S1HoLXemtU7riWt
d+OFYfccSPfjhUu44lg/8n+Ja4+XAb2Ly+ZcRhiOWewFa9i4ZeEouH3V5dpXmWnjXl3R4l5h0+Na
j3fEVrferVu5X/3WL/xz/CtU/wCRxla53HEFqHffxFeY90PhXlnT415p0RwFeV/tmXmPecZWpd3x
LXG/1h8S1SOfVv4VjXbf7rLpCiSf/wCazcKwg1P7mxY8Mg1d1qkjR8Vlr21hAry4seVJebD8sWC8
oOYLzalsgvLDmw15iRb21Fqb29i9u0tYFj4z2vlUO4PAsD/kcZWtbWB/CtSI/wBXL0lr3dcS8t7w
8S8z24+9eUDaPAvM3sfjXlkB+Sm8vMNrGD74R0Ke5ZuKBrzS7NnKDGVc9FV7HNVU/wDkpiNa572Z
EmL0tRIYu1BWrVRAiQNsNkURGAZgxJRviJsZqZVQAGlH0qZkGpRjtBUANlulWRBA4lWhbJsAkgBs
qoXLHQsZ4SYc4PbVYcLhuzsrYiNsoe9feo9IZV/3QW4Ng6Rwqjgtu8Kq50lZOFTiSXFCtaGXsR+r
hLWIXXbFn8RRYM22nJBz1ClA8zgdAyJBFjJyZEjbUpOTJspdWoAuyJyPssjoW4gx9pRqd9W0bOFh
h35Qyj6GN4WZXZUGfLmCNuX+NDpcZ9GEouOUAYrGjmwHWrd8tc7paj3vGVr/AEOJeXdLjXmHSWo9
Ba73i1aeGbshhioWPI1JxoklYXccSP8AyBwqXccSwf8Ak/xLXO54lqnSkta6HvXly1jDhJo4kiJD
OvLujxLX+nxrAGJJxAER2gy1vvVq/cr/AO6xTEiQ/tzUF/yrVZGwaw53HWt4uGXicBgdAC1Dvv4i
vMe6HwrU8C6wwZhjncrzTojgK8s/bMvMO84ytS7viWud+PiWp0oNWqdMVix5o/uH9oUZPyhq7NtM
VhtWP9xkzrGpXsLVqZEaHGXmHdBeVgZZVXm42gvKmyBeZnPJeWnIIVXmXfBaldkJAYLFi7Fljn/d
41hmEhIdgbC9WWBT/wDodt0rW/8Aj/wrUWDfzczZVrz2dl7GXlovAy7QuHqKheZ1siH9q8oG1J/Y
vMtPGtXJPNNN5eYHJ2wfrBQbObNCmHayqw3L0awn8RUQGkDbIA0TPTPd2kGc/wCFTMXBLE5MoRrb
RNGBJAAOTIjejdGl1FpABg1HReb0yjbC5wbRpUwSJUo1MgVJAWLn2CrDaKAlm0bILFmKxROgkKOR
WqwyKkGmh1IxAz1Qs31l31DpL71QgNnKyWNzkCpbTcKcmp2ASGBDujCMXvWl1rRa3BAZ/wDdwlrP
e4nxFXpSIYuAMqlGNQSanbROUgexDjLcK+8KQykbWxmZbuxwbB0J9KB4wvvCsybSwx+8M3F9CMwb
I+VNUs+TaR3f41HpcfoDSE4Nbz5OJSxIzJxsTkAk1WDhyqL4eL0Bda7HKMKzcC1HveMrzDocS8u6
XGvMOktQ6C1xvzrVnduzDssW4adoK2n2rBkbTq7DeUomo7d/apQ/F2DttMsICz+541rrFv5HEtUc
uXlVa3EsCIUrbavLL1LxNSsYxLtM2Ly7o8S8w6fGsEZC7bxWubWMy1fuVLvjwLEBNeyn/CsA/wC8
f4lrHcy4lqPfca8x7r+FYPTjwrzPojgK8rbL9y8yL/5vGVqPdcS1zvh8S1OOfV69VYsTUHWOMKMs
owG9igMhx3WP3K1Qf73GvMO7HAvK+kvNTR2qtVwpmP8AKLQZeYVFJVWoV/BReYMRXFqsOosPAtZ7
0j2lHoT4Fgf8nPtla1m/t7P8K1Hvc7/izrXoWPhOd5lgR50b4YrzMsxEKnfXlJOUE8C80lGhDcKw
RK0E8C8zOJzO3B9seNckNA1jvKe5tKD5j+LbKAHNLvyihIMKU5RTAG1ncrELNZt5QpZGD7yFyI5s
WJzLlsdCDzIDWCiLTkaZS+UJxKQFMmlSck8nKNoJ3Iss3EeXKtntQetKVQQAog5ehWJIu4LjdKhX
8OdSrkVpQc1rkUNOZW+xBjlORR0oKW5woA0H7aEQ+hQ6KgI2NxrWS9RhDJ/u4YWs97ifEfQH/bgd
F6b/ALlIAuGz/dsVFqPoFDSeJBi40hMBwKzJmCwwzcrMB9C4zDhRsBIrbaju/wASj0uPYAkQCa1V
2+HCBFjhVsfbWH0lCZqIyBNmQrWsesY4uFdiDUvuLVMQWwmTvOtexCwnLCJLZ15WZWmTe1a1hRa7
iSN5arPBIM8KDEEWFa1KVspAndWrAxcdmFjgU/mhavPFN2PYs+2Yol6HGBfdUjl7G3cWEIsGxwfa
tei9RgAkbhWqHbkucz5FqB2pcSk+XiXl3R4l5h0+NRnhSuzi7SBYrHwpSfFnO82dQOJIyIiQHzKT
/wCssUAN/JnwxWETb2x41j9weJaj33GvMiLey/hWrggVxIg768z6I4CvK9PuXmTf6vGVqIzYQ4Fr
Yz44+Jaoc2rn4Vi9/wAYUMS7efCEW0rDOfGWP3K1Rwx7WzdXmD/6YbeXlXTXnG2KLABY8pebAMCJ
WjjXldbMOq8yqP6qw3L0PAtaGXtSj0J8CwyKEY5IO6VrOJiyM59jKpqbFqULzzGK5jQHnZl5i+TB
4lgPVpDhXm4zQHGvKdB4F5rucKwZyo5LDPRaxhGRuTmSY5HFiD86FFJsrWKA5Vm1nKDAhnIdleOU
AvTMhbU1qMqmLAa1IOUI5hYgxERdizDJVOZGWlRMpZM6N2WSrF8oVv7VU2P4dKc1dsuhWftVXgbo
FACKoEydyQg5LlBqEgsSVigQAahLkm1RDUAZFgDpK5sN8piRoEmCg9XNOUrf/JE3g2R6q8brCpAC
jW69gNSuzA0lPGs8+xDoqOjjWt1/yh+rhLWS9uLM2H8xTP7ChUe1ZN9RBDS0P8BVj56S96kPm49k
1rsuQ7ZCpGzaQGnIChQ70eJG3a5IVATT8o96w6EV/Kyc2IDPZsVLaUJA0OXT6FSwAz504yvlRB2/
4lHLyuPYmJM7cl1yGJykEKEZuZxYEC2ireFXs+9AxLiBcOCONPeFdKJkQXDUPzMowhJ62VZa6Kkx
wiSV5bjwAlcMpMSzsSjjECJxOURmdEZQtYIsJitX7sLWTlGMFgyuXJQF1newWo94FjuSeRK3QsHb
x9OVa+WqdXD7xWpgUDyVlNK1GIIJAk4BdrFMnnZF5bEmso03l5iMt/jWZkXQnAAzANDYyOqXRdM7
71d20rGLf5M+GKwu+PGv5crt6N2RGZYcZTMsPCkJCGaq8wxsNxGWEWBtpFlq3eQ4QvNOiOAry6cp
gjCrMCpFloXmGJCsZ4glF8xJXlwFAcKo3FrpP4dYAHWWqZ/7f+FTw757O+SYZHdQxBEwaIizvYoP
/re5Y/crVc/a8a8w7sLysgWSXmzRPKHJ21gSlBoidS/3rzcCNZz5FbV5aSKRwmlpXmMbtZYzx0KO
sSwjK6DQHPvrFwTE3sSZnoUjZyJ5NpQDn+tLIc5WLiRiJX43DeBjasGWeUeFeYj/AGeJYPTHCvNz
kugcK8pG0eALzQysfjUNZnAmMLIgKWIIkCZvDadRmMtCpNUSGmqhoLBjWpQz5rhQFlPyfevwnLZ9
yIEmcflb2Ov6h6v/ALIX5GsRSzOuS77pQvYUpHKQETHV5EnQEZDBES7Wuv6a/p+3aVAY6COMIiZk
Y5A44ggJMweg40FGRoyndIkJ0Z1SCY8kZeSgLzvSgs0q9ebaYOhecNVyBQqhkcxAFfYgxkc9LPYu
UZAnI33K7/b3jGj5/Yu0GH2cTZFPk2IPJjdsURmWt90P1cJaw8aDFxGP+I7D2q1tKJnyrGCibCSx
B4ULpcZW2ba+hLQmzOc6oGG0G4EC0vanF5CUohxYSFdkBfNhbL7UXNu8gL1MoU4xkCbrgBrFSTn3
KhbMLLVCeJKpym0l1TSucQ1GBA4U14m2pIRqWrm/fUSbL1d9W2kbyBiXVZB0z7qDFNIEls6G0hao
EnNkWvd0Vg4EiOzw+aGrVWlAl6LWpWtKLLViRbhArEw4zPZSLmFbrp5YhvZBEG1djLEkcG24QLc7
hYuNI3hOMgIihrpWARLknHdzS0rEGEZC/ACQiWcbawNWEJxngkmRkKF8zOubVSAoHQkHL2vYvKuT
zokjaovMyAxGJxpjQ8K5LjRbwKM3LxAtQiQ5qX0bixT/ALUhXc2lhDL2xPCrMyGnbWu9weNOCxEg
1dta+SXJgHJ/xIE51remK8u7o/CtejKj6w9GJ5y1bFw5SjiDCAEg2bKiSakuVGqhl/ne5TuSMSYA
OC1Fqoeva8a8wyfywvK+Wedt1XmvLNm3RYP8yRN7m1ZeY4M8UieLLkhivLOUXGHQZ9K8xeRAOM5p
YogykGBq1vtRuEkEVdGeLJhdkKVLlf2jR7OMzMSvG9VSAocjFz7VgQl+aNXyuMy8zAHNwN+iwISY
g4keFecZhEFt9eTuHMwa2NQLzLBxeTOcmw2c3m4EROIi3NYk1VQBMc0VZB2LWEgm21S5IuRYAhy9
5852ldAAG770wLaHVSShcF2QDOU02Zs1XApwJnYZ22lSdzPesX9W8+SIdF32iGQxL0rrkGL1qKIi
6SCzvI5NCcvAiLc5xYgLxEQXFS7om9z6E50HPNsBTmhKZOLU9hyhbqFAXzlkeRGod7aqwAm1qJyV
dgWGgHhTUlpCoQBtAKUSanLmHpa33I/VwlrIb/OxPiKrQq3YORDlEq0t+22mle4leEi24iQSGyEL
OqP6DxlTSBxpr+5e+9Azkz2B3PsTOqAPnVyeWtKqpbbrxIuDOJpaY+9cjDZ7eUeNFuF1hASAkCaC
1RETdIHLkdFUwN9vxCKL2V/CPeiHtdg3S96oVJwzihQD+1cAT5kCm2HOQJqoiM7gNpc1UgZO7VIs
ZFpOycyF3S3EVrWrSJ7WcgYRrJxpAZap3MUcxNbU5QIOcJv3Vq4zYw+JSJryQnAO8fesqkIi00KA
lktYG1eUdH+Fead7xoSMgbaBUrmKA0IdGSniGN+9G6wIFv8A2UdRucyV++9u0yY2u7bSuxk8Ra2d
a3DGJvY+EYYbV5Vd5A3jQ5dpG7iECXOAoDuJpBiA+0TtLXZE1Eosy8sGfB/hXmErxBGsM/8AiWrP
zTgxJqyPIDZKoG7UbawsSItxwGO4pgGkoRotSL1OOxXmcXpHBiQvKC9ZSqvODbdFAbFfBbaFgRMj
fBtEiSsCIBvZNpeZMGu4oAZVLtsMmc6FbQZ0JxLSBeJzELWsTWXxsTWcM4d6gYs1Vq2X+ZHhXnXQ
961fDcRGqhsKUHjIbqM5yM5SqZEuSVXY2kc1PSzH0GTIRsBNSz+xGIqAmFir6Eq5Co4Qzkuqkbh2
a/Q8kLWxeF7sgG/+uEtZNj4s/iKpsAjfVXKdnzgp7gd7aJzFnTwi0UYsz5lVZUJvJ2c5tlzQbac2
HLX3LnWJ4OxaooEASHavNRDxLZmdXoUa0OPei4cbaJjEGIsoCFymB2mHAomIJiRR0LsSCc9AmNrI
sDYnulUicieQI/7KwogRRoalHkmxDkllYWyoi6bz2p5AgJsPmmrlWBOCqEtmb70AclVqokbMIZUb
gvMfYjciZNawQpIZwnDuHqxWrku5x6vpUxEEm7GmSxB4y0tQboTMctqDgu1asCuVCJzVK8sN0C6G
FbKLzOheWJQbqPINdpXYxqclibnAVJFbFE3SJmgJoGKleaQFaWDfVcQQDVyu2hODWxgMyYW5VRho
VNgvYRwLG1SMRKOOxMquGzLCx9YvSjhuGtIcMwda7IML+PeiJWsZOtWObBhwKgcChWhYOf8Aufcp
Sbk3I1Wo/wDI96817mK8n2pVXnO2KJtgY2CTHEjWJDFt9YmFe5GMb2JEtyjnVQzqxAtR67FgDe1W
h1mKw8bDHLwyJBy4cbS16Wsg9prcWBgzPulSBJY2M3vWUqtmlRqCxCDxiRlqfeiHFbVUq89CqFWv
sWAg2ghON5Wq0qteFfchKFZZEZgMSakUCtJKtsy5ETRzpVkd77lYNwD3J2pnb7lV6KxWCuQurAd0
+5O40VHEucELA1CRlVtf221aqkMucFzgucFzlrfK/wAofq4S1i9X+bP4iuauaubwqz2lWe1WKxPx
lferTvq0piSx2yrPasu+nArnVp300nOklVHtKYWaSvvOwSRU21Ks9pXN9p965vtPvVntPvXN9pVB
7T71l3z71l3z71Yd8+9We0+9WHfPvVh3z71Yd8qw7596sO+Uxi+6U13hQiBQWByrFQe1Zd9Zd9at
dMcS7hAFi7HMuYFSAXNG8uaN5CDm4C4jkBzgK9icuWeVT7U10bycRAKAkKNXKgTGzaXltMnJ3l5k
9WxKb6aF2JdwUDKdcrLnFc6mkoEGgqz/AHK8QHzPTgV0QjuFMI1y1QNz2hczcdADDAbbC/phtIVI
EHab3pyJHe96e7P2KMsa8TCIjFhEUGhWS9nvVk94e9YIAkG1gFsuRSF2RBhGyzhWohi4x3sqvMzW
uDFeVwjO9PDly45RpXm5qXGZUwyVTDPs40XvPue9UM1UTOlinAkNqi5slzCubL2Lmy9i5slzTvrm
lcw765p31zTvoG7ZW1cz2rmf+S5g31WA31zBvrmjfTiMVzYrmxVkU3JZfhVsVaBuLnDeXO9ia97F
zvZ9653s+9c8rnFVkSucVzjvq0rLsWKxc3hXNXNXNC5oVi1qn+UP1cJax3s/iOzarVarfoLFYrFY
rFZ6FqtVoVoVoVo+hsVisVn1Dy1iCRGrHaXmIcOcSzd+pw1S7QYglffiR6MVqnfLX+6CwekF5no9
T2Kz6XWu6H6uGtY72fxH6lYrFYrPQyrL6Fqt2bVarVardi1W+ll9KxWbGAD+Y8BWOG/HLhVis+hs
VisVisVisViiSKAgo4uEDdYDlUNFqhz4y14YkxEywwIuWc5gsEN+ILzPaCsVisVisVisVisVisVi
sVisVisVisVisVn09qtVqtVqtVuxarfT1ruh+rhLWO9n8R9cYHSPAVjdOXD9Tt2MHV7gbBnfvPbt
WKWOY3DJqAvYsGv4gvNNsetLFrXdD9XDWsd7P4j64wOlxFY3Tlw/V8HpBeZ6PWus90P1MNax3s/i
PrjA6XEVjdOXD9XwekFr+HMm9i0jT1rrHdj9TDWsd7P4j64wOlxFY3Tlw/V3BYiwhPIkk2k+tdY7
sfqQWsd7P4j64ji4fPhUOpYkudMud37G45/2x8cFrHez+I/9BaLH7sfHBax3s/iP/QbH6A+OC1jv
Z/Ef+g2P0B+pBax3s/iP/QGxMmOzj9AfHBax3s/iP/QOx9nH7sfHBax3s/iP2Vt9V2+lj92PjgsT
F/uSO0lKbXBS8X/MvFS6g+ZeKl1B8y8VLqD5l4qXUHzLxR6g+ZeJl1B8y8SeoPmXiT1B8y8SeoPm
XiT1B8y8SeoPmXiT1B8y8SeoPmXiZdQfMvEnqD5l4k9QfMvEnqD5l4mXUHzLxMuoPmXiZdQfMvEy
6g+ZeJPUHzLxJ6g+ZeJPUHzLxJ6g+ZeJPUHzLxJ6g+ZeJPUHzLxJ6g+ZeJPUHzLxB6g+ZeJPUHzL
xB6g+ZeIPUHzLxB6g+ZeIPUHzLxB6g968QeqPmXiD1B8y8QeoPevEHqD5l4g9QfMvEHqj5l4g9Ue
9eIPUHvXiD1B8y8QeoPevEHqj3rxB6g968QeoPevEHqD3rxB6g+ZeIPVHzLxB6o+ZeIPVHzLxB6o
968QeoPevEHqD5l4g9Ue9eIPVHvXiD1R714g9Ue9eIPVHvXiD1R714g9Ue9eIPVHvXiD1R714g9U
e9eIPVHvXiD1R8y8Qer/AOy8QeqPevEHqD3rxB6g968QeoPmXiD1R8y8QeqPevEHqj3rxB6v3rxB
6v3rxB6o968Qer/7LxB6o968QeqPevEHqj3rxB6o968QeoPevEHqj3rxB6g968QeoPevEHqD3rxB
6g968QeoPevEHqf+y8QeoPevEnqD5l4k9QfMvEnqD5l4k9QfMvEnqD5l4k9QfMvEnqD5l4k9QfMv
EnqD5l4k9QfMvEnqD5l4k9QfMvEnqD5l4k9QfMvEnqD5l4k9QfMvEnqD5l4mXUHzLxMuoPmXiT1B
8y8SeoPmXiT1B8y8TLqD5l4mXUHzLxMuoPmXiT1B8y8SeoPmXiZdQfMvFS6g+ZeJl1B8y8TLqD5l
4mXUHzLxMuoPmXiZdQfMvEy6g+ZeIPUHzLxB6o+ZeIPVHvU49uTfjd5tnKjLPtf/AKW//9k=" transform="matrix(0.78 0 0 0.78 0.8506 0)">
		</image>
		<g>
			<defs>
				<path id="SVGID_3_" d="M292.84,277.519c0.917,1.105,1.374,2.355,1.374,3.736v11.565c0,1.047-0.323,1.897-0.975,2.558
					c-0.65,0.66-1.494,0.99-2.539,0.99h-12.59c-1.041,0-1.898-0.33-2.571-0.99s-1.006-1.511-1.006-2.558v-12.683h4.44v12.299h10.8
					v-11.915l-13.803-16.679c-0.958-1.152-1.438-2.415-1.438-3.802v-10.128c0-1.041,0.333-1.895,1.006-2.555s1.53-0.99,2.571-0.99
					h12.59c1.045,0,1.889,0.33,2.539,0.99c0.651,0.66,0.975,1.514,0.975,2.555v11.629h-4.44v-11.245h-10.8v10.544L292.84,277.519
					L292.84,277.519z M320.195,296.369v-24.025H308.15v24.025h-4.44v-50h4.44v21.979h12.045v-21.979h4.439v50H320.195
					L320.195,296.369z M350.01,296.369l-1.755-11.117h-9.938l-1.758,11.117h-4.282v-0.13l8.916-50h4.376l8.853,50.13H350.01
					L350.01,296.369z M343.301,254.227l-4.409,27.095h8.789L343.301,254.227L343.301,254.227z M379.603,296.369l-13.355-36.775
					v36.775h-4.183v-50h3.319l13.102,36.134v-36.134h4.184v50H379.603L379.603,296.369z M402.708,275.152v-3.831h9.807v21.5
					c0,1.047-0.33,1.897-0.99,2.558s-1.51,0.99-2.558,0.99H395.84c-1.048,0-1.897-0.33-2.558-0.99s-0.99-1.511-0.99-2.558v-42.907
					c0-1.041,0.33-1.895,0.99-2.555s1.51-0.99,2.558-0.99h13.127c1.048,0,1.897,0.33,2.558,0.99s0.99,1.514,0.99,2.555v11.629
					h-4.472v-11.245h-11.308v42.14h11.308v-17.285H402.708L402.708,275.152z M438.56,296.369v-24.025h-12.045v24.025h-4.44v-50h4.44
					v21.979h12.045v-21.979H443v50H438.56L438.56,296.369z M468.374,296.369l-1.755-11.117h-9.937l-1.756,11.117h-4.284v-0.13
					l8.915-50h4.377l8.852,50.13H468.374L468.374,296.369z M461.667,254.227l-4.411,27.095h8.788L461.667,254.227L461.667,254.227z
					 M480.682,296.369v-50h4.44v50H480.682L480.682,296.369z"/>
			</defs>
			<clipPath id="SVGID_4_">
				<use xlink:href="#SVGID_3_"  overflow="visible"/>
			</clipPath>
			<g transform="matrix(1 0 0 1 -3.078952e-008 0)" clip-path="url(#SVGID_4_)">
				
					<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAAqUAAAKoAAACsT/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xABeAAEBAAAAAAAAAAAAAAAAAAAABwEBAAAAAAAAAAAAAAAAAAAAABABAQAAAAAAAAAAAAAAAAAA
sNARAQAAAAAAAAAAAAAAAAAAANASAQAAAAAAAAAAAAAAAAAAALD/2gAMAwEAAhEDEQAAAKAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAD/2gAIAQIAAQUAQ4//2gAIAQMAAQUAQ4//2gAIAQEAAQUAoQn/2gAIAQICBj8AQ4//2gAI
AQMCBj8AQ4//2gAIAQEBBj8AQhP/2Q==" transform="matrix(0.78 0 0 0.78 0.8506 0)">
				</image>
			</g>
		</g>
		<g>
			<defs>
				<path id="SVGID_5_" d="M501.325,290.113v4.044c0,0.333-0.111,0.606-0.327,0.815c-0.219,0.21-0.491,0.314-0.825,0.314h-4.104
					c-0.333,0-0.606-0.104-0.815-0.314c-0.213-0.209-0.317-0.482-0.317-0.815v-13.683c0-0.333,0.104-0.603,0.317-0.815
					c0.209-0.21,0.482-0.314,0.815-0.314h4.104c0.334,0,0.606,0.104,0.825,0.314c0.216,0.213,0.327,0.482,0.327,0.815v3.711h-1.428
					v-3.587h-3.546v13.435h3.546v-3.92H501.325L501.325,290.113z M509.577,295.287v-7.658h-3.837v7.658h-1.419v-15.942h1.419v7.011
					h3.837v-7.011h1.419v15.942H509.577L509.577,295.287z M514.144,295.287v-15.942h1.415v15.942H514.144L514.144,295.287z
					 M524.334,295.287l-4.259-11.725v11.725h-1.333v-15.942h1.057l4.177,11.524v-11.524h1.337v15.942H524.334L524.334,295.287z
					 M533.403,295.287l-0.559-3.545h-3.171l-0.559,3.545h-1.364v-0.038l2.84-15.945h1.396l2.822,15.983H533.403L533.403,295.287z
					 M531.263,281.852l-1.405,8.636h2.802L531.263,281.852L531.263,281.852z"/>
			</defs>
			<clipPath id="SVGID_6_">
				<use xlink:href="#SVGID_5_"  overflow="visible"/>
			</clipPath>
			<g transform="matrix(1 0 0 1 -3.078952e-008 0)" clip-path="url(#SVGID_6_)">
				
					<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAAqUAAAKoAAACsT/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xABeAAEBAAAAAAAAAAAAAAAAAAAABwEBAAAAAAAAAAAAAAAAAAAAABABAQAAAAAAAAAAAAAAAAAA
sNARAQAAAAAAAAAAAAAAAAAAANASAQAAAAAAAAAAAAAAAAAAALD/2gAMAwEAAhEDEQAAAKAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAD/2gAIAQIAAQUAQ4//2gAIAQMAAQUAQ4//2gAIAQEAAQUAoQn/2gAIAQICBj8AQ4//2gAI
AQMCBj8AQ4//2gAIAQEBBj8AQhP/2Q==" transform="matrix(0.78 0 0 0.78 0.8506 0)">
				</image>
			</g>
		</g>
		<g>
			<defs>
				<path id="SVGID_7_" d="M507.713,257.429l-7.766,13.564h8.651v3.701h-13.708v-2.618l9.572-16.831v-5.221h-5.147v5.252h-4.136
					v-6.284c0-1.708,0.86-2.564,2.584-2.564h8.289c1.721,0,2.581,0.857,2.581,2.564v5.475
					C508.634,255.357,508.33,256.343,507.713,257.429L507.713,257.429z M522.996,274.695h-8.813c-1.676,0-2.514-0.863-2.514-2.584
					v-23.08c0-1.733,0.856-2.603,2.567-2.603h8.76c1.694,0,2.546,0.87,2.546,2.603v23.08
					C525.542,273.832,524.69,274.695,522.996,274.695L522.996,274.695z M521.406,250.06h-5.58v21.004h5.58V250.06L521.406,250.06z
					 M541.29,257.429l-7.767,13.564h8.652v3.701h-13.708v-2.618l9.572-16.831v-5.221h-5.148v5.252h-4.135v-6.284
					c0-1.708,0.859-2.564,2.583-2.564h8.29c1.721,0,2.58,0.857,2.58,2.564v5.475C542.209,255.357,541.905,256.343,541.29,257.429
					L541.29,257.429z M556.572,274.695h-8.817c-1.672,0-2.507-0.863-2.507-2.584v-23.08c0-1.733,0.854-2.603,2.564-2.603h8.76
					c1.698,0,2.545,0.87,2.545,2.603v23.08C559.117,273.832,558.27,274.695,556.572,274.695L556.572,274.695z M554.978,250.06
					h-5.579v21.004h5.579V250.06L554.978,250.06z"/>
			</defs>
			<clipPath id="SVGID_8_">
				<use xlink:href="#SVGID_7_"  overflow="visible"/>
			</clipPath>
			<g transform="matrix(1 0 0 1 -3.078952e-008 0)" clip-path="url(#SVGID_8_)">
				
					<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAAtPAAALmwAADGL/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xACaAAEBAQEAAAAAAAAAAAAAAAAABQMHAQEAAgMBAAAAAAAAAAAAAAAAAwUBBAYCEAAABAMJAQEB
AAAAAAAAAAAAAhIEATMUIDBQYKAyAzQVBcAREQABAgcBAAMAAAAAAAAAAAABADEwYHGxAnIDwCFB
wRIAAQEECQMFAAAAAAAAAAAAAQAgUBECoKGx4RIycpIzcQNzITFRYZH/2gAMAwEAAhEDEQAAAOgA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZa5R5kDkLAAAAADatJrX+qFpAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAy1yjzIHIWAAAAAG1aTWv9ULSAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABlrlHmQOQsAAAAANq0mtf6oW
kAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADLV5zDXFRPDXBDXB
DXBDXBDXBJrG/EG14AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAA//2gAIAQIAAQUA0mXNKtfGkY5zSrXxpGOc0q18aRjhyqJ4sB4sB4sB4sB4sB4sAza03HqM
v//aAAgBAwABBQDSZF3WnG7HC7rTjdjhd1pxuxyEf5GpFSKkVIqRUjkOuOoy/9oACAEBAAEFAPyK
7mReNOznlzIvGnZzy5kXjTs55cyLxp2c8uZF407OeXMi8adnPLiQkwSYJMEmCTBJgkwSYJMEmCTB
Jg0hGo/R6//aAAgBAgIGPwCiZdzRNY3P5TYH73NE1jc/lNgfvc0TWNz+U2B+zS+2KUy/q5jsvXMd
l65jsvXMdl65jsvXMdl6MmLHimxRhD6pGf8A/9oACAEDAgY/AKJkOobGl+jqGxpfo6hsaX6D8LLW
stay1rLWstay1oGEPSFIz//aAAgBAQEGPwDyK/TUxcKm09dNTFwqbT101MXCptPXTUxcKm09dNTF
wqbT101MXCptPXTUpimKYpimKYpimKYpimKYrD4+/wA9Ht//2Q==" transform="matrix(0.78 0 0 0.78 0.8506 0)">
				</image>
			</g>
		</g>
		<g>
			<defs>
				<path id="SVGID_9_" d="M200.002,74.123V38.549h3.16v35.574H200.002L200.002,74.123z M221.108,74.123l-9.501-26.162v26.162
					h-2.979V38.549h2.364l9.318,25.708V38.549h2.98v35.574H221.108L221.108,74.123z M235.257,41.345v32.778h-3.159V41.345h-5.545
					v-2.796h14.206v2.796H235.257L235.257,41.345z M255.683,71.326v2.797h-11.684V38.549h11.545v2.796h-8.385V54.62h7.636v2.842
					h-7.636v13.863H255.683L255.683,71.326z M271.128,57.143c-0.229,0-0.563-0.011-1.003-0.035
					c-0.438-0.022-0.756-0.034-0.952-0.034c2.047,5.669,4.081,11.35,6.112,17.048h-3.316l-6.617-19.047l0.435-0.638h5.637V41.345
					h-8.115v32.778h-3.16V38.549h11.913c0.739,0,1.346,0.235,1.815,0.705s0.705,1.078,0.705,1.819v13.501
					C274.581,56.288,273.432,57.143,271.128,57.143L271.128,57.143z M292.437,60.712c0.65,0.787,0.978,1.674,0.978,2.658v8.228
					c0,0.743-0.231,1.349-0.691,1.819c-0.464,0.471-1.066,0.706-1.81,0.706h-8.956c-0.743,0-1.353-0.235-1.828-0.706
					c-0.477-0.47-0.718-1.076-0.718-1.819v-9.023h3.161v8.75h7.681v-8.478l-9.819-11.864c-0.68-0.819-1.022-1.72-1.022-2.708v-7.204
					c0-0.741,0.241-1.349,0.718-1.819c0.476-0.47,1.085-0.705,1.828-0.705h8.956c0.743,0,1.346,0.235,1.81,0.705
					c0.46,0.47,0.691,1.078,0.691,1.819v8.274h-3.161v-8.001h-7.681v7.501L292.437,60.712L292.437,60.712z M311.918,59.291
					c-0.479,0.479-1.089,0.717-1.832,0.717h-8.388v14.116h-3.158V38.549h11.546c0.743,0,1.353,0.235,1.832,0.705
					c0.476,0.47,0.717,1.078,0.717,1.819v16.39C312.635,58.203,312.394,58.814,311.918,59.291L311.918,59.291z M309.474,41.345
					h-7.775v15.844h7.775V41.345L309.474,41.345z M328.854,71.326v2.797h-11.687V38.549h11.547v2.796h-8.386V54.62h7.637v2.842
					h-7.637v13.863H328.854L328.854,71.326z M345.002,71.326v2.797h-11.684V38.549h11.547v2.796h-8.389V54.62h7.637v2.842h-7.637
					v13.863H345.002L345.002,71.326z M363.721,62.575v9.023c0,0.743-0.241,1.349-0.729,1.819c-0.482,0.471-1.099,0.706-1.841,0.706
					h-9.16c-0.742,0-1.346-0.235-1.815-0.706c-0.473-0.47-0.708-1.076-0.708-1.819V41.073c0-0.741,0.235-1.349,0.708-1.819
					c0.47-0.47,1.073-0.705,1.815-0.705h9.16c0.742,0,1.358,0.235,1.841,0.705c0.488,0.47,0.729,1.078,0.729,1.819v8.274h-3.18
					v-8.001h-7.916v29.98h7.916v-8.75H363.721L363.721,62.575z M380.507,74.123V57.031h-8.569v17.092h-3.157V38.549h3.157v15.639
					h8.569V38.549h3.161v35.574H380.507L380.507,74.123z"/>
			</defs>
			<clipPath id="SVGID_10_">
				<use xlink:href="#SVGID_9_"  overflow="visible"/>
			</clipPath>
			<g transform="matrix(1 0 0 1 -3.078952e-008 0)" clip-path="url(#SVGID_10_)">
				
					<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAAveAAAMnwAADmn/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xACPAAEBAQAAAAAAAAAAAAAAAAAABwQBAQEBAQAAAAAAAAAAAAAAAAADAgQQAAEDAgYDAAMBAAAA
AAAAAAABBRUTFFBwAgMENUAxEmCQsNARAAEFAAIDAAEFAAAAAAAAAAIA0QOTNAEEcLJ0IWCQ0BFB
EgACAgEEAwEAAAAAAAAAAAAAATEyEUBQIYFwcQKw/9oADAMBAAIRAxEAAACgAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEupiopcpmopcKilwq
KXCopcKilwqKXCopcKilwqKXCopcKilwqKXCopcKilwqKcUeWwxoAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAABJqzJuiIdMQAAAAAAAAAAAAANdOmNO5bhCoAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACTVmTdEQ6YgAAAAAAAAAAAAAa6dMady3CFQAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEmrMm6Ih0xAAAAAAAAAAAAAA106Y07luEKg
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJNWc1cS9UFMS9UBL1QEvVAS9UBL
1QEvVAS9UBL1QEvVAS9UBL1QEvVAS9UBL1QE6p2fRLYT2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB/9oA
CAECAAEFAMl9vbXWW6lupbqW6lupbqW6lupbqW6lupbqW6lupbqbm38LifG8Dke8T43gcj3ifG8D
ke8T2txNBcIXCFwhcIXCFwhcIXCFwhcIXCFwhcIXCFwhu7n2v8oD/9oACAEDAAEFAMl9ev5KyFZC
shWQrIVkKyFZCshWQrIVkKyFZCsho1/WKb3gbPrE97wNn1ie94Gz6xPXo+iipRUoqUVKKlFSipRU
oqUVKKlFSipRUoqaNHyn8oD/2gAIAQEAAQUA/WFIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBI
uBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuBIuA18/na3PIZp7XIZp7XIZp
7XIZp7XIZp7XIZp7XIZp7X8njm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8j
m8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm8jm808Dg6NX+S1f/9oACAECAgY/APC75xgsiyLI
siyLIsiyLIsiyLIsiyLISznO6fXWgXrdPrrQL1un11oF63R8ZyVKlSpUqVKlSpUqVKlRPGMflA//
2gAIAQMCBj8A8LriSCCCCCCCCCCCCCCCN0Xege6LvQPdF3oHui5gkkkkkkkkkkkkkkkfP5QP/9oA
CAEBAQY/AP2wtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1h
OtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hOtU1hO
tU1hOtU1hOtU1hOumB9mUhKeLgh5MueOeOT4/HP58D9L6IvcfA/S+iL3HwP0voi9x8D9L6IvcfA/
S+iL3HwP0voi9x8D9L6Ivcf1RlhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFll
hrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllhrFllh
rFllhrFllhrFllhrFllhrFllhrFlwYdaISHn+xLgB45454/3j8fxLX//2Q==" transform="matrix(0.78 0 0 0.78 0.8506 0)">
				</image>
			</g>
		</g>
		<g>
			<defs>
				<path id="SVGID_11_" d="M206.902,94.725c-0.705,0.77-1.646,1.154-2.823,1.154c-1.15,0-2.071-0.378-2.765-1.135
					c-0.693-0.755-1.041-1.739-1.041-2.953c0-1.305,0.355-2.346,1.062-3.12c0.708-0.773,1.665-1.162,2.876-1.162
					c1.123,0,2.031,0.379,2.717,1.136c0.686,0.755,1.029,1.739,1.029,2.952C207.957,92.911,207.606,93.952,206.902,94.725
					L206.902,94.725z M206.131,89.302c-0.482-0.568-1.145-0.853-1.983-0.853c-0.825,0-1.495,0.302-2.006,0.9
					c-0.515,0.6-0.771,1.385-0.771,2.358c0,0.979,0.252,1.765,0.752,2.354c0.498,0.592,1.152,0.887,1.957,0.887
					c0.864,0,1.544-0.283,2.036-0.851c0.493-0.565,0.739-1.357,0.739-2.374C206.854,90.678,206.613,89.872,206.131,89.302
					L206.131,89.302z M212.862,94.948c0.803,0,1.5-0.181,2.092-0.544v1.005c-0.595,0.313-1.344,0.47-2.242,0.47
					c-1.16,0-2.089-0.368-2.782-1.104c-0.693-0.738-1.039-1.714-1.039-2.925c0-1.303,0.391-2.352,1.171-3.147
					c0.781-0.793,1.772-1.193,2.974-1.193c0.773,0,1.413,0.111,1.919,0.33v1.093c-0.579-0.322-1.214-0.482-1.907-0.482
					c-0.903,0-1.638,0.302-2.206,0.903c-0.568,0.601-0.853,1.412-0.853,2.434c0,0.968,0.264,1.738,0.794,2.307
					C211.313,94.664,212.006,94.948,212.862,94.948L212.862,94.948z M219.022,88.575v7.168h-1.05v-7.168h-2.325v-0.93h5.713v0.93
					H219.022L219.022,88.575z M228.602,94.725c-0.705,0.77-1.646,1.154-2.823,1.154c-1.15,0-2.071-0.378-2.765-1.135
					c-0.693-0.755-1.041-1.739-1.041-2.953c0-1.305,0.354-2.346,1.062-3.12c0.708-0.773,1.665-1.162,2.876-1.162
					c1.125,0,2.031,0.379,2.717,1.136c0.688,0.755,1.03,1.739,1.03,2.952C229.657,92.911,229.305,93.952,228.602,94.725
					L228.602,94.725z M227.83,89.302c-0.482-0.568-1.143-0.853-1.983-0.853c-0.827,0-1.495,0.302-2.008,0.9
					c-0.513,0.6-0.768,1.385-0.768,2.358c0,0.979,0.251,1.765,0.749,2.354c0.501,0.592,1.153,0.887,1.958,0.887
					c0.865,0,1.544-0.283,2.038-0.851c0.49-0.565,0.737-1.357,0.737-2.374C228.554,90.678,228.313,89.872,227.83,89.302
					L227.83,89.302z M234.411,91.416v0.02c0.549,0.063,0.99,0.271,1.318,0.625c0.33,0.354,0.494,0.801,0.494,1.346
					c0,0.698-0.249,1.262-0.749,1.691c-0.5,0.429-1.14,0.646-1.92,0.646h-2.441v-8.098h2.378c0.711,0,1.278,0.17,1.704,0.511
					c0.424,0.339,0.638,0.789,0.638,1.351c0,0.447-0.125,0.839-0.379,1.182C235.201,91.03,234.854,91.271,234.411,91.416
					L234.411,91.416z M232.158,94.828h1.253c0.548,0,0.972-0.125,1.27-0.378c0.299-0.254,0.446-0.604,0.446-1.055
					c0-0.925-0.629-1.39-1.886-1.39h-1.083V94.828L232.158,94.828z M233.203,88.561h-1.045v2.536h0.952
					c0.504,0,0.901-0.124,1.188-0.37s0.432-0.585,0.432-1.021C234.73,88.942,234.223,88.561,233.203,88.561L233.203,88.561z
					 M242.077,94.816v0.927h-4.355v-8.098h4.172v0.93h-3.125v2.595h2.895v0.925h-2.895v2.721H242.077L242.077,94.816z
					 M246.689,91.994v0.028c0.224,0.094,0.414,0.227,0.576,0.401c0.163,0.175,0.372,0.472,0.631,0.894l1.516,2.426h-1.232
					l-1.343-2.247c-0.27-0.451-0.516-0.755-0.738-0.918c-0.224-0.162-0.488-0.243-0.8-0.243h-0.752v3.409h-1.048v-8.098h2.47
					c0.785,0,1.402,0.19,1.849,0.57c0.446,0.381,0.67,0.913,0.67,1.595C248.487,90.933,247.889,91.66,246.689,91.994L246.689,91.994
					z M246.981,88.917c-0.271-0.236-0.667-0.355-1.186-0.355h-1.249v2.859h1.231c0.479,0,0.866-0.14,1.163-0.419
					c0.299-0.277,0.447-0.644,0.447-1.097C247.389,89.483,247.253,89.153,246.981,88.917L246.981,88.917z M257.544,91.165
					c-0.271,0.482-0.782,1.098-1.535,1.844l-1.846,1.803v0.022h3.711v0.909h-4.923v-0.89l2.352-2.346
					c0.647-0.647,1.082-1.161,1.302-1.539c0.221-0.379,0.33-0.768,0.33-1.174c0-0.458-0.131-0.813-0.388-1.057
					c-0.257-0.246-0.632-0.37-1.118-0.37c-0.721,0-1.409,0.306-2.063,0.92v-1.041c0.637-0.492,1.379-0.738,2.225-0.738
					c0.729,0,1.303,0.2,1.726,0.592c0.42,0.394,0.631,0.924,0.631,1.588C257.946,90.192,257.813,90.684,257.544,91.165
					L257.544,91.165z M260.695,90.84c0.303-0.018,0.53-0.027,0.684-0.027c0.854,0,1.519,0.217,1.998,0.649
					c0.477,0.432,0.714,1.027,0.714,1.789c0,0.784-0.254,1.417-0.768,1.901c-0.511,0.484-1.219,0.727-2.122,0.727
					c-0.755,0-1.325-0.111-1.709-0.335v-1.046c0.545,0.355,1.111,0.533,1.698,0.533c0.559,0,1.009-0.155,1.349-0.467
					c0.343-0.311,0.513-0.727,0.513-1.241c0-0.519-0.175-0.922-0.521-1.208c-0.348-0.286-0.849-0.429-1.508-0.429
					c-0.317,0-0.742,0.024-1.28,0.071v-4.113h3.983v0.905h-3.031V90.84L260.695,90.84z M265.555,92.931v-0.79h3.078v0.79H265.555
					L265.555,92.931z M274.409,91.165c-0.272,0.482-0.78,1.098-1.536,1.844l-1.847,1.803v0.022h3.713v0.909h-4.926v-0.89
					l2.355-2.346c0.647-0.647,1.079-1.161,1.301-1.539c0.219-0.379,0.33-0.768,0.33-1.174c0-0.458-0.13-0.813-0.387-1.057
					c-0.261-0.246-0.632-0.37-1.121-0.37c-0.72,0-1.409,0.306-2.063,0.92v-1.041c0.638-0.492,1.381-0.738,2.225-0.738
					c0.73,0,1.302,0.2,1.727,0.592c0.422,0.394,0.632,0.924,0.632,1.588C274.813,90.192,274.676,90.684,274.409,91.165
					L274.409,91.165z M280.271,94.67c-0.584,0.805-1.394,1.208-2.428,1.208c-0.571,0-1.051-0.092-1.444-0.276v-0.957
					c0.444,0.257,0.927,0.386,1.444,0.386c0.724,0,1.285-0.286,1.682-0.857c0.4-0.571,0.6-1.382,0.6-2.434
					c-0.006,0.006-0.016,0.006-0.021,0c-0.355,0.674-0.952,1.014-1.79,1.014c-0.68,0-1.244-0.233-1.688-0.701
					c-0.444-0.465-0.67-1.074-0.67-1.819c0-0.795,0.244-1.446,0.733-1.958c0.485-0.509,1.117-0.767,1.897-0.767
					c0.803,0,1.435,0.317,1.886,0.951c0.45,0.631,0.679,1.552,0.679,2.761C281.15,92.717,280.858,93.868,280.271,94.67
					L280.271,94.67z M279.637,88.934c-0.289-0.381-0.667-0.571-1.137-0.571c-0.435,0-0.796,0.165-1.082,0.495
					c-0.285,0.332-0.429,0.746-0.429,1.246c0,0.544,0.137,0.973,0.419,1.293c0.279,0.319,0.664,0.478,1.152,0.478
					c0.429,0,0.784-0.145,1.073-0.435c0.292-0.292,0.435-0.644,0.435-1.06C280.068,89.797,279.926,89.316,279.637,88.934
					L279.637,88.934z M282.389,97.086h-0.959l3.885-9.441h0.955L282.389,97.086L282.389,97.086z M294.43,88.747
					c-0.311,0.208-0.47,0.495-0.47,0.865c0,0.329,0.108,0.594,0.324,0.8c0.219,0.205,0.686,0.486,1.409,0.843
					c0.793,0.377,1.352,0.758,1.666,1.139c0.317,0.378,0.473,0.807,0.473,1.279c0,0.711-0.254,1.257-0.771,1.636
					c-0.517,0.379-1.231,0.57-2.148,0.57c-0.317,0-0.691-0.045-1.12-0.134s-0.739-0.2-0.934-0.332v-1.192
					c0.248,0.218,0.581,0.394,0.994,0.533c0.415,0.138,0.806,0.21,1.181,0.21c1.136,0,1.704-0.403,1.704-1.214
					c0-0.225-0.061-0.43-0.181-0.611c-0.124-0.181-0.292-0.341-0.502-0.481c-0.216-0.138-0.615-0.354-1.199-0.643
					c-0.813-0.403-1.346-0.779-1.604-1.126c-0.257-0.346-0.39-0.743-0.39-1.189c0-0.673,0.272-1.206,0.813-1.6
					c0.539-0.392,1.219-0.592,2.044-0.592c0.803,0,1.396,0.098,1.771,0.293v1.14c-0.488-0.336-1.104-0.506-1.851-0.506
					C295.144,88.436,294.741,88.541,294.43,88.747L294.43,88.747z M304.449,95.743v-3.648h-4.097v3.648h-1.048v-8.098h1.048v3.519
					h4.097v-3.519h1.045v8.098H304.449L304.449,95.743z M312.686,95.743l-0.825-2.217h-3.374l-0.777,2.217h-1.155l3.085-8.098h1.117
					l3.085,8.098H312.686L312.686,95.743z M310.312,89.247c-0.038-0.108-0.082-0.297-0.127-0.565h-0.025
					c-0.038,0.244-0.082,0.432-0.13,0.565l-1.213,3.364h2.721L310.312,89.247L310.312,89.247z M320.277,95.743l-4.072-6.298
					c-0.117-0.179-0.206-0.354-0.275-0.528h-0.032c0.025,0.183,0.041,0.559,0.041,1.135v5.692h-1.034v-8.098h1.311l3.961,6.189
					c0.19,0.292,0.302,0.48,0.337,0.56h0.019c-0.035-0.23-0.051-0.624-0.051-1.181v-5.569h1.035v8.098H320.277L320.277,95.743z
					 M326.813,92.401v-0.924h2.809v3.71c-0.813,0.46-1.72,0.69-2.739,0.69c-1.171,0-2.116-0.37-2.834-1.111
					c-0.714-0.739-1.072-1.728-1.072-2.966c0-1.25,0.394-2.279,1.187-3.083c0.794-0.805,1.806-1.209,3.044-1.209
					c0.873,0,1.615,0.141,2.219,0.419v1.128c-0.647-0.408-1.419-0.611-2.314-0.611c-0.882,0-1.608,0.302-2.18,0.905
					c-0.571,0.604-0.857,1.398-0.857,2.383c0,1.014,0.264,1.805,0.79,2.368c0.524,0.564,1.238,0.847,2.133,0.847
					c0.619,0,1.14-0.119,1.571-0.355v-2.191H326.813L326.813,92.401z M336.496,95.743v-3.648h-4.098v3.648h-1.048v-8.098h1.048
					v3.519h4.098v-3.519h1.047v8.098H336.496L336.496,95.743z M344.729,95.743l-0.825-2.217h-3.37l-0.778,2.217h-1.158l3.085-8.098
					h1.117l3.085,8.098H344.729L344.729,95.743z M342.357,89.247c-0.038-0.108-0.079-0.297-0.127-0.565h-0.025
					c-0.038,0.244-0.079,0.432-0.13,0.565l-1.212,3.364h2.717L342.357,89.247L342.357,89.247z M348.775,94.859v0.884h-2.605v-0.884
					h0.78v-6.33h-0.78v-0.884h2.605v0.884h-0.777v6.33H348.775L348.775,94.859z M352.968,97.227h-0.742l0.647-2.785h0.986
					L352.968,97.227L352.968,97.227z M362.029,94.948c0.803,0,1.498-0.181,2.092-0.544v1.005c-0.597,0.313-1.343,0.47-2.241,0.47
					c-1.161,0-2.088-0.368-2.78-1.104c-0.695-0.738-1.041-1.714-1.041-2.925c0-1.303,0.388-2.352,1.168-3.147
					c0.781-0.793,1.771-1.193,2.978-1.193c0.774,0,1.409,0.111,1.917,0.33v1.093c-0.578-0.322-1.216-0.482-1.911-0.482
					c-0.901,0-1.635,0.302-2.202,0.903c-0.571,0.601-0.854,1.412-0.854,2.434c0,0.968,0.267,1.738,0.797,2.307
					C360.478,94.664,361.172,94.948,362.029,94.948L362.029,94.948z M370.776,95.743v-3.648h-4.098v3.648h-1.047v-8.098h1.047v3.519
					h4.098v-3.519h1.047v8.098H370.776L370.776,95.743z M375.629,94.859v0.884h-2.603v-0.884h0.781v-6.33h-0.781v-0.884h2.603v0.884
					h-0.777v6.33H375.629L375.629,94.859z M382.218,95.743l-4.068-6.298c-0.118-0.179-0.21-0.354-0.279-0.528h-0.032
					c0.025,0.183,0.041,0.559,0.041,1.135v5.692h-1.031v-8.098h1.308l3.964,6.189c0.188,0.292,0.302,0.48,0.333,0.56h0.022
					c-0.034-0.23-0.054-0.624-0.054-1.181v-5.569h1.035v8.098H382.218L382.218,95.743z M390.647,95.743l-0.825-2.217h-3.37
					l-0.781,2.217h-1.155l3.085-8.098h1.117l3.085,8.098H390.647L390.647,95.743z M388.273,89.247
					c-0.038-0.108-0.079-0.297-0.127-0.565h-0.025c-0.038,0.244-0.079,0.432-0.13,0.565l-1.209,3.364h2.717L388.273,89.247
					L388.273,89.247z"/>
			</defs>
			<clipPath id="SVGID_12_">
				<use xlink:href="#SVGID_11_"  overflow="visible"/>
			</clipPath>
			<g transform="matrix(1 0 0 1 -3.078952e-008 0)" clip-path="url(#SVGID_12_)">
				
					<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAAsuAAAL8AAADWf/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xACHAAEBAQEBAQAAAAAAAAAAAAAABwYEAwEBAQEAAAAAAAAAAAAAAAAAAAABEAABBQABAwQDAAAA
AAAAAAAAEQQUBRY1QGABULACBoCg0BEAAAUEAQQBBAMAAAAAAAAAAAECkgPRMtIzBGARsnRQIZES
IqDQYRIBAAAAAAAAAAAAAAAAAAAAsP/aAAwDAQACEQMRAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAABizaMIN2wg3bCDdsIN2wg3bCDdsIN2wg3bCDdsIN2wg3bCDdsIN2wg3bCDds
J7m0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmFPmC8ggAAAAAAAAAAAAA
B18nWU8WAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJhT/IlSqFlaqCVqoJW
qglaqCVqoJWqglaqCVqoJWqglaqCVqoJWqglaqCVqoJX10n6eoQAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAD//2gAIAQIAAQUA9w4//9oACAEDAAEFAPyoQQQQQQQQQQQQQQQT1Tx2Kooo
oooooooooooovs/P/9oACAEBAAEFAP2+n/221bvtnbmztzZ25s7c2dubO3NnbmztzZ25s7c2dubO
3NnbmztzZ25s7c2dubO3NnbmztzZ25s7c2dubO3NnbmztzZ25s7c2dubO3NnbjD7baOH3c1tyvX1
HK9zW3K9fUcr3Nbcr19Ryvc3lq2+XmI0IjQiNCI0IjQiNCI0IjQiNCI0IjQiNCI0IjQiNCI0IjQi
NCI0IjQiNCI0IjQiNCI0IjQiNCI0IjQiNCI0PDVt8fP8dN//2gAIAQICBj8AQ4//2gAIAQMCBj8A
Q4//2gAIAQEBBj8A/l9cnjxlF+EUq4090mZ9kqNJd/2/wWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIW
wsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWw
sPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIWwsPIcbjyFF+E0qI1dkn37KUST7ft1Pzf
Yl81fAcL2IvNPU/N9iXzV8BwvYi809T832JfNXwHC9iLzT1OalQoMz+pmaS7mf2GiNiaDRGxNBoj
Ymg0RsTQaI2JoNEbE0GiNiaDRGxNBojYmg0RsTQaI2JoNEbE0GiNiaDRGxNBojYmg0RsTQaI2JoN
EbE0GiNiaDRGxNBojYmg0RsTQaI2JoNEbE0GiNiaDRGxNBojYmg0RsTQaI2JoNEbE0GiNiaAlJhQ
Rl9SMkl3I/t/Tp3/2Q==" transform="matrix(0.78 0 0 0.78 0.8506 0)">
				</image>
			</g>
		</g>
		<g>
			<defs>
				<path id="SVGID_13_" d="M201.992,102.397c-0.314,0.208-0.468,0.495-0.468,0.865c0,0.327,0.105,0.593,0.323,0.798
					c0.214,0.207,0.684,0.486,1.404,0.843c0.797,0.379,1.351,0.76,1.668,1.14c0.319,0.379,0.477,0.806,0.477,1.279
					c0,0.712-0.258,1.258-0.773,1.636c-0.518,0.381-1.231,0.57-2.148,0.57c-0.32,0-0.693-0.043-1.122-0.132
					c-0.425-0.089-0.738-0.2-0.932-0.332v-1.192c0.248,0.216,0.579,0.394,0.992,0.533c0.414,0.138,0.81,0.209,1.181,0.209
					c1.138,0,1.706-0.405,1.706-1.214c0-0.227-0.063-0.43-0.185-0.613c-0.122-0.179-0.289-0.339-0.501-0.479
					c-0.213-0.14-0.613-0.354-1.197-0.643c-0.813-0.405-1.347-0.779-1.605-1.127c-0.257-0.346-0.386-0.743-0.386-1.188
					c0-0.673,0.27-1.208,0.811-1.6c0.54-0.394,1.221-0.592,2.044-0.592c0.804,0,1.394,0.098,1.771,0.293v1.139
					c-0.489-0.338-1.106-0.506-1.851-0.506C202.706,102.086,202.303,102.189,201.992,102.397L201.992,102.397z M212.011,109.392
					v-3.648h-4.097v3.648h-1.047v-8.096h1.047v3.518h4.097v-3.518h1.047v8.096H212.011L212.011,109.392z M220.246,109.392
					l-0.826-2.215h-3.371l-0.779,2.215h-1.155l3.085-8.096h1.117l3.085,8.096H220.246L220.246,109.392z M217.872,102.896
					c-0.038-0.106-0.079-0.297-0.125-0.563h-0.025c-0.038,0.243-0.083,0.43-0.13,0.563l-1.213,3.366h2.717L217.872,102.896
					L217.872,102.896z M227.837,109.392l-4.07-6.298c-0.114-0.178-0.208-0.352-0.278-0.527h-0.03
					c0.027,0.181,0.042,0.559,0.042,1.133v5.692h-1.035v-8.096h1.313l3.961,6.189c0.188,0.292,0.302,0.479,0.336,0.56h0.02
					c-0.035-0.231-0.053-0.625-0.053-1.182v-5.567h1.036v8.096H227.837L227.837,109.392z M234.375,106.052v-0.925h2.806v3.712
					c-0.808,0.46-1.721,0.689-2.739,0.689c-1.17,0-2.114-0.368-2.829-1.109c-0.716-0.741-1.075-1.73-1.075-2.967
					c0-1.251,0.396-2.279,1.188-3.082c0.79-0.805,1.805-1.209,3.042-1.209c0.874,0,1.615,0.14,2.222,0.419v1.128
					c-0.647-0.408-1.42-0.611-2.316-0.611c-0.881,0-1.608,0.301-2.18,0.905c-0.571,0.603-0.856,1.396-0.856,2.383
					c0,1.014,0.262,1.804,0.79,2.368c0.525,0.565,1.234,0.846,2.132,0.846c0.617,0,1.141-0.117,1.574-0.354v-2.192H234.375
					L234.375,106.052z M244.058,109.392v-3.648h-4.099v3.648h-1.046v-8.096h1.046v3.518h4.099v-3.518h1.045v8.096H244.058
					L244.058,109.392z M252.293,109.392l-0.827-2.215h-3.371l-0.777,2.215h-1.156l3.083-8.096h1.119l3.083,8.096H252.293
					L252.293,109.392z M249.92,102.896c-0.039-0.106-0.081-0.297-0.127-0.563h-0.025c-0.038,0.243-0.081,0.43-0.13,0.563
					l-1.213,3.366h2.717L249.92,102.896L249.92,102.896z M256.334,108.508v0.884h-2.603v-0.884h0.779v-6.33h-0.779v-0.882h2.603
					v0.882h-0.778v6.33H256.334L256.334,108.508z M265.494,108.508v0.884h-2.603v-0.884h0.779v-6.33h-0.779v-0.882h2.603v0.882
					h-0.777v6.33H265.494L265.494,108.508z M272.083,109.392l-4.072-6.298c-0.114-0.178-0.206-0.352-0.276-0.527h-0.028
					c0.025,0.181,0.038,0.559,0.038,1.133v5.692h-1.034v-8.096h1.313l3.961,6.189c0.188,0.292,0.302,0.479,0.337,0.56h0.019
					c-0.035-0.231-0.051-0.625-0.051-1.182v-5.567h1.035v8.096H272.083L272.083,109.392z M277.862,102.226v7.166h-1.051v-7.166
					h-2.326v-0.93h5.713v0.93H277.862L277.862,102.226z M285.689,108.467v0.925h-4.354v-8.096h4.173v0.93h-3.126v2.593h2.895v0.925
					h-2.895v2.723H285.689L285.689,108.467z M290.304,105.644v0.027c0.223,0.095,0.413,0.227,0.578,0.403
					c0.158,0.174,0.368,0.471,0.628,0.893l1.518,2.425h-1.235l-1.342-2.247c-0.267-0.449-0.515-0.755-0.736-0.917
					c-0.223-0.162-0.492-0.244-0.8-0.244h-0.756v3.408h-1.044v-8.096h2.466c0.787,0,1.406,0.189,1.854,0.57
					c0.444,0.381,0.667,0.912,0.667,1.595C292.101,104.583,291.5,105.311,290.304,105.644L290.304,105.644z M290.596,102.567
					c-0.272-0.236-0.666-0.355-1.187-0.355h-1.251v2.86h1.234c0.479,0,0.867-0.141,1.165-0.419c0.299-0.279,0.444-0.645,0.444-1.098
					C291.002,103.132,290.869,102.803,290.596,102.567L290.596,102.567z M299.368,109.392l-4.068-6.298
					c-0.118-0.178-0.21-0.352-0.279-0.527h-0.032c0.028,0.181,0.041,0.559,0.041,1.133v5.692h-1.031v-8.096h1.311l3.958,6.189
					c0.19,0.292,0.302,0.479,0.337,0.56h0.021c-0.034-0.231-0.054-0.625-0.054-1.182v-5.567h1.035v8.096H299.368L299.368,109.392z
					 M307.798,109.392l-0.825-2.215h-3.37l-0.781,2.215h-1.155l3.085-8.096h1.117l3.085,8.096H307.798L307.798,109.392z
					 M305.424,102.896c-0.038-0.106-0.079-0.297-0.127-0.563h-0.025c-0.038,0.243-0.082,0.43-0.13,0.563l-1.213,3.366h2.721
					L305.424,102.896L305.424,102.896z M312.571,102.226v7.166h-1.051v-7.166h-2.326v-0.93h5.713v0.93H312.571L312.571,102.226z
					 M317.868,108.508v0.884h-2.603v-0.884h0.778v-6.33h-0.778v-0.882h2.603v0.882h-0.777v6.33H317.868L317.868,108.508z
					 M325.19,108.374c-0.704,0.77-1.644,1.154-2.824,1.154c-1.149,0-2.069-0.376-2.765-1.133c-0.695-0.755-1.038-1.741-1.038-2.953
					c0-1.306,0.353-2.347,1.061-3.12c0.704-0.774,1.663-1.161,2.875-1.161c1.124,0,2.031,0.379,2.717,1.136
					c0.686,0.756,1.028,1.739,1.028,2.952C326.244,106.561,325.896,107.602,325.19,108.374L325.19,108.374z M324.419,102.953
					c-0.482-0.568-1.146-0.852-1.983-0.852c-0.825,0-1.495,0.3-2.006,0.9c-0.514,0.6-0.768,1.384-0.768,2.358
					c0,0.979,0.247,1.763,0.749,2.354c0.501,0.592,1.148,0.885,1.955,0.885c0.863,0,1.542-0.281,2.037-0.849
					c0.492-0.567,0.739-1.358,0.739-2.376C325.143,104.329,324.901,103.522,324.419,102.953L324.419,102.953z M333.074,109.392
					l-4.072-6.298c-0.114-0.178-0.206-0.352-0.275-0.527h-0.032c0.025,0.181,0.041,0.559,0.041,1.133v5.692h-1.034v-8.096h1.311
					l3.961,6.189c0.19,0.292,0.305,0.479,0.337,0.56h0.019c-0.031-0.231-0.051-0.625-0.051-1.182v-5.567h1.035v8.096H333.074
					L333.074,109.392z M341.504,109.392l-0.825-2.215h-3.37l-0.781,2.215h-1.155l3.085-8.096h1.117l3.085,8.096H341.504
					L341.504,109.392z M339.13,102.896c-0.038-0.106-0.079-0.297-0.127-0.563h-0.025c-0.038,0.243-0.082,0.43-0.13,0.563
					l-1.216,3.366h2.724L339.13,102.896L339.13,102.896z M343.723,109.392v-8.096h1.044v7.171h3.225v0.925H343.723L343.723,109.392z
					 M358.681,108.597c0.8,0,1.498-0.179,2.089-0.544v1.006c-0.597,0.313-1.343,0.468-2.241,0.468c-1.161,0-2.088-0.367-2.78-1.104
					c-0.695-0.736-1.041-1.712-1.041-2.925c0-1.303,0.391-2.352,1.171-3.147c0.778-0.793,1.771-1.191,2.975-1.191
					c0.774,0,1.412,0.111,1.917,0.33v1.093c-0.578-0.322-1.216-0.482-1.908-0.482c-0.901,0-1.638,0.3-2.205,0.903
					c-0.568,0.6-0.851,1.412-0.851,2.433c0,0.968,0.263,1.738,0.793,2.308C357.129,108.313,357.824,108.597,358.681,108.597
					L358.681,108.597z M368.387,108.374c-0.702,0.77-1.645,1.154-2.818,1.154c-1.152,0-2.076-0.376-2.768-1.133
					c-0.695-0.755-1.041-1.741-1.041-2.953c0-1.306,0.355-2.347,1.06-3.12c0.711-0.774,1.666-1.161,2.876-1.161
					c1.126,0,2.031,0.379,2.72,1.136c0.686,0.756,1.031,1.739,1.031,2.952C369.446,106.561,369.091,107.602,368.387,108.374
					L368.387,108.374z M367.618,102.953c-0.482-0.568-1.146-0.852-1.986-0.852c-0.825,0-1.492,0.3-2.006,0.9
					c-0.512,0.6-0.769,1.384-0.769,2.358c0,0.979,0.248,1.763,0.749,2.354c0.502,0.592,1.152,0.885,1.962,0.885
					c0.863,0,1.539-0.281,2.031-0.849c0.495-0.567,0.742-1.358,0.742-2.376C368.342,104.329,368.101,103.522,367.618,102.953
					L367.618,102.953z M376.273,109.392l-4.072-6.298c-0.114-0.178-0.209-0.352-0.279-0.527h-0.031
					c0.031,0.181,0.044,0.559,0.044,1.133v5.692H370.9v-8.096h1.311l3.964,6.189c0.188,0.292,0.299,0.479,0.333,0.56h0.022
					c-0.038-0.231-0.054-0.625-0.054-1.182v-5.567h1.038v8.096H376.273L376.273,109.392z M382.621,109.392h-1.162l-2.917-8.096
					h1.162l2.181,6.33c0.073,0.209,0.127,0.451,0.161,0.725h0.025c0.029-0.239,0.09-0.485,0.185-0.736l2.222-6.319h1.117
					L382.621,109.392L382.621,109.392z M390.98,108.467v0.925h-4.354v-8.096h4.174v0.93h-3.126v2.593h2.895v0.925h-2.895v2.723
					H390.98L390.98,108.467z M397.776,109.392l-4.069-6.298c-0.114-0.178-0.21-0.352-0.279-0.527h-0.028
					c0.028,0.181,0.041,0.559,0.041,1.133v5.692h-1.035v-8.096h1.312l3.964,6.189c0.184,0.292,0.299,0.479,0.333,0.56h0.022
					c-0.038-0.231-0.054-0.625-0.054-1.182v-5.567h1.034v8.096H397.776L397.776,109.392z M403.559,102.226v7.166h-1.054v-7.166
					h-2.323v-0.93h5.713v0.93H403.559L403.559,102.226z M408.853,108.508v0.884h-2.603v-0.884h0.78v-6.33h-0.78v-0.882h2.603v0.882
					h-0.777v6.33H408.853L408.853,108.508z M416.175,108.374c-0.705,0.77-1.645,1.154-2.822,1.154c-1.148,0-2.072-0.376-2.768-1.133
					c-0.691-0.755-1.037-1.741-1.037-2.953c0-1.306,0.352-2.347,1.063-3.12c0.705-0.774,1.664-1.161,2.873-1.161
					c1.127,0,2.031,0.379,2.72,1.136c0.686,0.756,1.028,1.739,1.028,2.952C417.231,106.561,416.879,107.602,416.175,108.374
					L416.175,108.374z M415.403,102.953c-0.482-0.568-1.143-0.852-1.98-0.852c-0.828,0-1.498,0.3-2.009,0.9
					c-0.511,0.6-0.769,1.384-0.769,2.358c0,0.979,0.251,1.763,0.749,2.354c0.502,0.592,1.152,0.885,1.958,0.885
					c0.863,0,1.543-0.281,2.035-0.849c0.492-0.567,0.739-1.358,0.739-2.376C416.127,104.329,415.886,103.522,415.403,102.953
					L415.403,102.953z"/>
			</defs>
			<clipPath id="SVGID_14_">
				<use xlink:href="#SVGID_13_"  overflow="visible"/>
			</clipPath>
			<g transform="matrix(1 0 0 1 -3.078952e-008 0)" clip-path="url(#SVGID_14_)">
				
					<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAAsiAAAL7gAADVj/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xACDAAEBAQEBAQAAAAAAAAAAAAAABwUEAQIBAQEAAAAAAAAAAAAAAAAAAAABEAACAwEAAAUFAAAA
AAAAAAAABQQUFhFQYLADNYDQAQIGEQABBAMAAAYCAwAAAAAAAAADAAECktEyM2ARErIEdFBhsNAT
EgEAAAAAAAAAAAAAAAAAAACw/9oADAMBAAIRAxEAAACgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAE1KUlZaolYqiViqJWKolYqiViqJWKolYqiViqJWKolYqiViqJWKolYqiViqJWKolYqiV9RSgg
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACYU+YLyCAAAAAAAAAAAAAAAAHXydZ
TxYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxtnwxG4MNuDDbgw24MNuDDbgw
24MNuDDbgw24MNuDDbgw24MNuDDbgw24MNuDD+9kAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAf/2gAIAQIAAQUA9Q4//9oACAEDAAEFAPrs4cOHDhw4cOHDhw4cOHDhzxL8ekE/
/9oACAEBAAEFAPQCGsqT+rS3LLcstyy3LLcstyy3LLcstyy3LLcstyy3LLcstyy3LLcstyy3LLcs
tyy3LLcstyy3LLcstyy3LLcstyy3LLcstyy3LLcsVSpP7NPMTb5XwNR8r5ibfK+BqPlfMXv/AMqp
ke/j0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj
0pj0pj0pj0pj0pj0pj0pj0pj0pj0pj0p7H8qpj+/9mwv/9oACAECAgY/AEOP/9oACAEDAgY/AEOP
/9oACAEBAQY/AP4Aj5kYmmzMcrMzSfyZvW/7Xcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5Z
Xcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZX
cl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXcl5ZXw4yNN2c4mdnk/k7e
uP78R/N+wX3y/CfC+wL3x8R/N+wX3y/CfC+wL3x8RkOSM/8AQspTn5Tdm9Un9TrUl3WpLutSXdak
u61Jd1qS7rUl3WpLutSXdaku61Jd1qS7rUl3WpLutSXdaku61Jd1qS7rUl3WpLutSXdaku61Jd1q
S7rUl3WpLutSXdaku61Jd1qS7rUl3WpLutSXdaku61Jd0M44z/0FKM4ec3dvVF/U39Nh/wD/2Q==" transform="matrix(0.78 0 0 0.78 0.8506 0)">
				</image>
			</g>
		</g>
		<g>
			<defs>
				<path id="SVGID_15_" d="M424.062,109.392l-4.076-6.298c-0.114-0.178-0.206-0.352-0.276-0.527h-0.028
					c0.025,0.181,0.038,0.559,0.038,1.133v5.692h-1.035v-8.096h1.314l3.961,6.189c0.188,0.292,0.299,0.479,0.336,0.56h0.02
					c-0.035-0.231-0.051-0.625-0.051-1.182v-5.567h1.035v8.096H424.062L424.062,109.392z M436.853,108.597
					c0.803,0,1.498-0.179,2.091-0.544v1.006c-0.597,0.313-1.346,0.468-2.243,0.468c-1.159,0-2.086-0.367-2.777-1.104
					c-0.695-0.736-1.041-1.712-1.041-2.925c0-1.303,0.387-2.352,1.168-3.147c0.78-0.793,1.773-1.191,2.974-1.191
					c0.777,0,1.415,0.111,1.92,0.33v1.093c-0.577-0.322-1.216-0.482-1.907-0.482c-0.904,0-1.638,0.3-2.206,0.903
					c-0.567,0.6-0.854,1.412-0.854,2.433c0,0.968,0.267,1.738,0.797,2.308C435.304,108.313,435.996,108.597,436.853,108.597
					L436.853,108.597z M444.809,108.467v0.925h-4.351v-8.096h4.17v0.93h-3.126v2.593h2.895v0.925h-2.895v2.723H444.809
					L444.809,108.467z M451.607,109.392l-4.071-6.298c-0.114-0.178-0.206-0.352-0.276-0.527h-0.028
					c0.025,0.181,0.041,0.559,0.041,1.133v5.692h-1.038v-8.096h1.314l3.96,6.189c0.188,0.292,0.299,0.479,0.333,0.56h0.022
					c-0.035-0.231-0.054-0.625-0.054-1.182v-5.567h1.038v8.096H451.607L451.607,109.392z M457.386,102.226v7.166h-1.051v-7.166
					h-2.326v-0.93h5.713v0.93H457.386L457.386,102.226z M465.213,108.467v0.925h-4.352v-8.096h4.167v0.93h-3.123v2.593h2.895v0.925
					h-2.895v2.723H465.213L465.213,108.467z M469.825,105.644v0.027c0.225,0.095,0.419,0.227,0.58,0.403
					c0.162,0.174,0.372,0.471,0.629,0.893l1.518,2.425h-1.235l-1.343-2.247c-0.27-0.449-0.514-0.755-0.736-0.917
					c-0.225-0.162-0.491-0.244-0.803-0.244h-0.752v3.408h-1.044v-8.096h2.466c0.79,0,1.406,0.189,1.851,0.57
					c0.447,0.381,0.669,0.912,0.669,1.595C471.624,104.583,471.024,105.311,469.825,105.644L469.825,105.644z M470.12,102.567
					c-0.273-0.236-0.666-0.355-1.188-0.355h-1.25v2.86h1.234c0.477,0,0.867-0.141,1.165-0.419c0.295-0.279,0.444-0.645,0.444-1.098
					C470.526,103.132,470.389,102.803,470.12,102.567L470.12,102.567z"/>
			</defs>
			<clipPath id="SVGID_16_">
				<use xlink:href="#SVGID_15_"  overflow="visible"/>
			</clipPath>
			<g transform="matrix(1 0 0 1 -3.078952e-008 0)" clip-path="url(#SVGID_16_)">
				
					<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAArjAAALQgAAC/n/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xACEAAEBAQEBAAAAAAAAAAAAAAAABQcEAwEBAQAAAAAAAAAAAAAAAAAAAAEQAAEDAgYDAQAAAAAA
AAAAAAAEFAURFmCwAgM1BjCAAdARAAAFAwIFBQEAAAAAAAAAAAABApID0TIzEQQwYCESsrDQMXJ0
wRIBAAAAAAAAAAAAAAAAAAAAsP/aAAwDAQACEQMRAAAA0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAA5DrZWXVGVjVGVjVGVjVGVjVEymgAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADj7OMzESgAAAaNUl1LAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHn6CGuCGuCGuCGuCGuDw9wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAA//9oACAECAAEFAMw4/9oACAEDAAEFAPf2hQoUKZRv/9oACAEB
AAEFAMgqlfv3TFu1Y7VjtWO1Y7VjtWO1Y7VjtWO1Z1zVq1wuO5fivF1rg8dy/FeLrXB4739nQo2L
PhSz4Us+FLPhSz4Us+FLPhSz4Us+FLPhRGk2Uab8a5//2gAIAQICBj8AQ4//2gAIAQMCBj8AQ4//
2gAIAQEBBj8A9Aq3iknoZQSmRl8kfYoZ5HqqM8j1VGeR6qjPI9VRnkeqozyPVUZ5HqqM8j1VGeR6
qjPI9VRtVLM1KNKtTM9TuVz5vfzy+CuHtPqrzVz5vfzy+CuHtPqrzVz5JBJjlSpC9Oh9qi7TFsjz
FsjzFsjzFsjzFsjzFsjzFsjzFsjzFsjzFsjzCNtBqUUZGSSM9T6nr/fZrr//2Q==" transform="matrix(0.78 0 0 0.78 0.8506 0)">
				</image>
			</g>
		</g>
		<g>
			<defs>
				<path id="SVGID_17_" d="M409.659,51.516l-9.795,17.102h10.912v4.668h-17.285v-3.302l12.066-21.225v-6.582h-6.49v6.628h-5.215
					v-7.927c0-2.155,1.086-3.234,3.257-3.234h10.454c2.172,0,3.257,1.079,3.257,3.234v6.901
					C410.82,48.904,410.433,50.148,409.659,51.516L409.659,51.516z M428.924,73.287h-11.112c-2.11,0-3.168-1.085-3.168-3.256V40.925
					c0-2.185,1.079-3.28,3.234-3.28h11.045c2.143,0,3.212,1.095,3.212,3.28v29.106C432.136,72.202,431.067,73.287,428.924,73.287
					L428.924,73.287z M426.921,42.223h-7.037v26.485h7.037V42.223L426.921,42.223z M451.994,51.516l-9.79,17.102h10.907v4.668
					h-17.287v-3.302l12.07-21.225v-6.582h-6.487v6.628h-5.218v-7.927c0-2.155,1.086-3.234,3.257-3.234H449.9
					c2.17,0,3.256,1.079,3.256,3.234v6.901C453.156,48.904,452.768,50.148,451.994,51.516L451.994,51.516z M471.259,73.287h-11.112
					c-2.11,0-3.164-1.085-3.164-3.256V40.925c0-2.185,1.076-3.28,3.234-3.28h11.042c2.142,0,3.212,1.095,3.212,3.28v29.106
					C474.471,72.202,473.401,73.287,471.259,73.287L471.259,73.287z M469.256,42.223h-7.036v26.485h7.036V42.223L469.256,42.223z"/>
			</defs>
			<clipPath id="SVGID_18_">
				<use xlink:href="#SVGID_17_"  overflow="visible"/>
			</clipPath>
			<g transform="matrix(1 0 0 1 0 0)" clip-path="url(#SVGID_18_)">
				
					<image overflow="visible" width="1080" height="735" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEAXABcAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAAuXAAAMDwAADUn/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIAt8ENgMBIgACEQEDEQH/
xACdAAEBAQEBAAAAAAAAAAAAAAAABQIBBwEBAAIDAQAAAAAAAAAAAAAAAAQFAQIDBhAAAQIEBQIG
AwAAAAAAAAAAAAEDAhMzFDBQYAQVIDFwsBESMgWQ0CERAAEEAgMAAgIDAAAAAAAAAAEAkQIysXIw
YAMRMZDQQVESEgABAgUDBAMAAAAAAAAAAAABAAJQETKSMzBxcnCwsQMhURL/2gAMAwEAAhEDEQAA
APQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAADGG0VUyLSKLSKLSKLSKLSKLSKLSRXm8glaAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMbxrmMPHWAAAAAAG7Mazexgto4AAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADG8a5jDx1gAAAAABuzGs3sYLaOAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxvGuYw8dYAAAAAAbsxrN7GC2jgAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMb5hEWVJKjLIjLIjLIjLI
jLIjLIk2cbsOITOYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH/9oACAECAAEFAPCaJfROagOagOagOagOagOagOag
Npu03MOdufDr+mp5258Ov6annbnw6/pqedxJ6w8LGcLGcLGcLGcLGcLGcLGbHaLtofKef//aAAgB
AwABBQDwmQtlLZS2UtlLZS2UtlHG/Yudp369z3ztO/Xue+dp369z3ztO9yhcoXKFyhcoXKFyg657
18p5/9oACAEBAAEFAPxwO/xqa6TXSa6TXSa6TXSa6TXSa6TXSa6TXSa6TXRl1xXtbu0sZivrd2lj
MV9bu0sZivrd2ljMV9bu0sZivrd2ljMV9bu0sZivrdU9SU0SmiU0SmiU0SmiU0SmiU0SmiU0SmiU
0SmhGm0X9Suf/9oACAECAgY/AOkxP0JrE65YnXLE65YnXLE65YnXLE65OcGln5MvkzjjuJ0PbzHi
OO4nQ9vMeI47idD28x4jhH2CFlbasrbVlbasrbVlbasrbVlbanNLg/8ARn8CXaev/9oACAEDAgY/
AOk9QVQVQVQVQVQVQQE5zjg30G7Rwb6Ddo4N9Bu0dpKpKpKpKpKpKpKBlKXaev/aAAgBAQEGPwD8
cEyPv/Jwrycq8nKvJyrycq8nKvJyrycq8nKvJyrycq8nKvJyrycq8nK8wZyIMh/J/vvE9Tjn89o5
7xPU45/PaOe8T1OOfz2jnvE9Tjn89o57xPU45/PaOe8T1OOfz2jnvE9Tjn89o57x8H6VIsFSLBUi
wVIsFSLBUiwVIsFSLBUiwVIsFSLBUiwVIsFSLBfIhEEfR+B+pXf/2Q==" transform="matrix(0.78 0 0 0.78 0 0)">
				</image>
			</g>
		</g>
	</g>
</g>
<g id="copyright__x0028_replace_TODO_x0027_s_x0029_">
</g>
<g>
	<polygon fill="#FFFFFF" points="761.366,38.36 709.508,72.914 761.544,105.485 813.424,71.561 	"/>
	<path d="M810.018,74.233c0.53,0.648,1.446,1.11,2.289,1.581c0.275,0.152,0.724,0.283,0.752,0.542
		c0.036,0.354-1.141,1.004-1.459,1.209c-12.383,8.056-24.555,15.803-36.961,23.837c-3.619,2.345-7.481,4.898-11.212,7.293
		c-0.471,0.301-1.297,0.919-1.708,0.919c-0.442-0.002-1.238-0.627-1.707-0.919c-16.235-10.05-32.364-19.952-48.673-29.961
		c-0.353-0.215-1.481-0.731-1.504-1.084c-0.025-0.466,1.015-0.777,1.377-0.961c0.582-0.287,1.123-0.456,1.5-0.831
		c-0.817-0.789-1.871-1.292-2.916-1.958c-0.284-0.18-0.811-0.441-0.835-0.751c-0.031-0.434,0.926-0.928,1.46-1.294
		c1.685-1.144,3.201-2.167,4.875-3.29c13.072-8.756,26.026-17.522,39.13-26.296c2.177-1.457,4.267-3.09,6.501-4.378
		c0.527-0.3,1,0.119,1.46,0.419c4.042,2.661,8.196,5.344,12.21,7.918c8.772,5.636,17.337,11.212,26.168,16.834
		c3.727,2.374,7.661,4.912,11.461,7.378c0.359,0.233,1.523,0.901,1.544,1.126c0.025,0.342-0.604,0.572-0.877,0.749
		C811.971,72.91,810.824,73.614,810.018,74.233 M761.719,39.141c-0.114-0.217-0.349-0.315-0.543-0.456
		c-0.36,0.069-0.395,0.466-0.831,0.456c-0.331,0.24-0.573,0.571-1.044,0.669c-0.18,0.333-0.621,0.402-0.83,0.709h-0.211
		c-0.344,0.364-0.802,0.613-1.166,0.957c-0.451-0.005-0.451,0.439-0.917,0.418c-0.259,0.322-0.57,0.596-1.043,0.708
		c-0.314,0.269-0.541,0.624-1.042,0.707c-0.295,0.275-0.586,0.554-1.04,0.667c-0.147,0.371-0.639,0.39-0.834,0.71h-0.208
		c-0.425,0.353-0.76,0.797-1.376,0.961c-0.226,0.284-0.592,0.432-0.832,0.705h-0.249c-0.279,0.281-0.531,0.581-1.002,0.668
		c-0.199,0.313-0.579,0.448-0.835,0.71c-0.5,0.079-0.689,0.475-1.04,0.706h-0.25c-0.278,0.28-0.53,0.583-1.002,0.666
		c-0.116,0.104-0.131,0.313-0.375,0.293c0.023,0.217-0.114,0.272-0.332,0.249c-0.156,0.22-0.365,0.385-0.709,0.418
		c-0.063,0.075-0.072,0.206-0.124,0.294c-0.42,0.022-0.466,0.423-0.918,0.414c0.022,0.218-0.116,0.272-0.333,0.249
		c-0.221,0.167-0.338,0.441-0.709,0.462c-0.1,0.256-0.349,0.373-0.666,0.417c-0.052,0.153-0.12,0.294-0.375,0.248
		c-0.063,0.075-0.072,0.205-0.126,0.288c-0.299-0.046-0.239,0.261-0.583,0.169c0.021,0.218-0.113,0.274-0.333,0.248
		c-0.053,0.31-0.379,0.348-0.668,0.418c-0.046,0.177-0.095,0.349-0.374,0.293c-0.167,0.544-0.935,0.483-1.168,0.957
		c-0.163-0.038-0.104,0.149-0.288,0.083c-0.008,0.066,0.013,0.157-0.044,0.171h-0.251c-0.311,0.241-0.508,0.601-1,0.666
		c-0.214,0.228-0.429,0.457-0.79,0.542c-0.01,0.058,0.014,0.15-0.043,0.164c-0.505,0.008-0.539,0.489-1.002,0.542
		c-0.005,0.064,0.019,0.156-0.039,0.169c-0.328-0.079-0.254,0.244-0.584,0.166c-0.101,0.287-0.327,0.452-0.667,0.501
		c-0.428,0.36-0.798,0.783-1.416,0.958c-0.176,0.214-0.313,0.466-0.708,0.46c-0.272,0.279-0.554,0.555-0.999,0.665
		c-0.493,0.608-1.362,0.835-1.877,1.418h-0.249c-0.238,0.26-0.545,0.488-0.791,0.667c-0.173,0.121-0.396,0.169-0.585,0.289
		c-0.319,0.207-0.589,0.684-1.042,0.667v0.209c-0.461,0.056-0.548,0.48-1.041,0.501c-0.301,0.284-0.552,0.615-1.043,0.707
		c-0.281,0.293-0.551,0.588-1.042,0.666c-0.234,0.349-0.57,0.597-1.041,0.71c-0.293,0.289-0.588,0.581-1.042,0.708
		c-0.18,0.348-0.662,0.395-0.833,0.749c-0.05-0.046-0.075-0.117-0.209-0.08c0.032,0.239-0.095,0.317-0.332,0.29
		c-0.054,0.321-0.428,0.325-0.709,0.417c0.021,0.216-0.116,0.272-0.334,0.248c-0.037,0.354-0.41,0.37-0.71,0.46
		c0.023,0.218-0.116,0.271-0.331,0.248c-0.06,0.318-0.41,0.345-0.711,0.418c0.032,0.239-0.097,0.322-0.332,0.292
		c-0.099,0.274-0.395,0.353-0.708,0.418c0.022,0.217-0.116,0.271-0.335,0.249c-0.169,0.222-0.342,0.436-0.706,0.457
		c0.021,0.218-0.115,0.273-0.335,0.253c-0.04,0.082-0.08,0.165-0.124,0.25c-0.771,0.254-1.1,0.956-1.958,1.123
		c0.056,0.263-0.174,0.229-0.293,0.292c-0.082,0.042-0.153,0.224-0.208,0.25c-0.056,0.026-0.154-0.03-0.208,0
		c-0.039,0.02-0.063,0.148-0.125,0.166c-0.278,0.087-0.509,0.231-0.502,0.418c0.019,0.423,0.822,0.384,0.961,0.789
		c0.212-0.015,0.384,0.008,0.375,0.208c0.219-0.02,0.355,0.035,0.333,0.253c0.327-0.065,0.286,0.244,0.624,0.166v0.17
		c0.196,0.025,0.357,0.084,0.417,0.248c0.472,0.11,0.751,0.417,1.043,0.705c0.188-0.021,0.312,0.026,0.332,0.17
		c0.389,0,0.568,0.209,0.709,0.458c0.373-0.053,0.363,0.273,0.708,0.249v0.168c0.287,0.071,0.614,0.108,0.669,0.419h0.289
		c-0.06,0.155,0.116,0.078,0.085,0.208c0.352,0.007,0.475,0.244,0.667,0.411h0.292c0.316,0.354,0.752,0.583,1.125,0.878
		c0.34,0.018,0.516,0.208,0.668,0.417h0.289c0.312,0.521,1.107,0.562,1.459,1.043c0.374,0.015,0.488,0.29,0.709,0.456
		c0.34,0.021,0.516,0.206,0.665,0.417c0.354-0.09,0.292,0.237,0.626,0.168c-0.062,0.186,0.121,0.126,0.083,0.294
		c0.456,0.082,0.747,0.333,1.043,0.581c0.217-0.022,0.356,0.032,0.332,0.247c0.213-0.015,0.385,0.008,0.377,0.21
		c0.691,0.131,0.978,0.662,1.623,0.836c0.006,0.301,0.517,0.092,0.459,0.457c0.328-0.078,0.255,0.245,0.585,0.166
		c-0.059,0.17,0.089,0.131,0.083,0.253c0.331,0,0.523,0.139,0.624,0.372c0.738,0.179,1.097,0.738,1.833,0.916
		c0.094,0.338,0.558,0.306,0.668,0.626h0.292c-0.053,0.137,0.132,0.032,0.08,0.17c0.488-0.021,0.459,0.482,0.96,0.454
		c0.225,0.432,0.861,0.447,1.127,0.834h0.25c-0.061,0.159,0.113,0.081,0.082,0.21c0.364,0.011,0.508,0.239,0.709,0.416
		c0.464,0.116,0.763,0.404,1.042,0.709c0.188-0.023,0.311,0.022,0.332,0.166c0.801,0.215,1.183,0.847,2.002,1.041
		c-0.058,0.169,0.089,0.135,0.085,0.253c0.187-0.024,0.308,0.021,0.33,0.165c0.417-0.028,0.487,0.291,0.709,0.457
		c0.354,0.023,0.516,0.237,0.707,0.418c0.221-0.021,0.358,0.033,0.335,0.253h0.248c-0.005,0.115,0.128,0.094,0.128,0.204
		c0.34,0.021,0.516,0.207,0.666,0.419c0.342,0.03,0.539,0.209,0.709,0.416c0.199-0.02,0.337,0.023,0.332,0.208
		c0.366,0.008,0.509,0.242,0.709,0.417c0.352,0.008,0.479,0.247,0.668,0.418h0.291c0.434,0.622,1.376,0.733,1.832,1.333
		c0.342,0.021,0.516,0.206,0.667,0.416c0.19-0.022,0.313,0.022,0.335,0.166c0.396-0.006,0.532,0.245,0.71,0.458
		c0.641,0.19,1.041,0.627,1.665,0.834c0.411,0.646,1.411,0.701,1.833,1.335c0.43,0.096,0.779,0.274,1.001,0.581h0.291
		c0.089,0.203,0.355,0.229,0.417,0.46c0.327-0.067,0.283,0.241,0.627,0.167c0.451,0.573,1.296,0.758,1.791,1.292
		c0.354-0.092,0.29,0.236,0.625,0.165c-0.062,0.188,0.119,0.131,0.08,0.294c0.596,0.086,0.809,0.557,1.377,0.667v0.165
		c0.211-0.016,0.385,0.004,0.374,0.207c0.467-0.006,0.459,0.462,0.958,0.417c0.063,0.217,0.378,0.18,0.418,0.419h0.252
		c0.048,0.148,0.143,0.243,0.373,0.209c-0.056,0.165,0.092,0.131,0.083,0.248c0.33-0.079,0.256,0.244,0.585,0.168
		c-0.058,0.168,0.089,0.13,0.081,0.251c0.644,0.064,0.803,0.613,1.417,0.705v0.17h0.586c0.007-0.066-0.017-0.157,0.04-0.17
		c0.466-0.048,0.509-0.516,0.999-0.54c0.009-0.062-0.012-0.153,0.042-0.165c0.666-0.226,1.02-0.765,1.71-0.961
		c0.275-0.293,0.585-0.555,1.041-0.666c0.128-0.264,0.346-0.431,0.708-0.457c0.264-0.444,0.918-0.503,1.167-0.961
		c0.44-0.006,0.442-0.448,0.918-0.418c0.021-0.143,0.144-0.189,0.334-0.163c0.258-0.327,0.563-0.604,1.041-0.712
		c0.293-0.287,0.573-0.593,1.042-0.705c0.149-0.229,0.356-0.393,0.709-0.419c0.336-0.244,0.534-0.631,1.042-0.709
		c0.303-0.375,0.807-0.551,1.165-0.874h0.21c0.052-0.32,0.41-0.338,0.709-0.415c-0.025-0.219,0.115-0.275,0.332-0.249
		c-0.023-0.22,0.117-0.274,0.334-0.253c0.177-0.211,0.355-0.422,0.708-0.459c0.144-0.217,0.339-0.381,0.667-0.415
		c0.299-0.297,0.56-0.632,1.081-0.709c0.154-0.211,0.327-0.399,0.669-0.417c0.453-0.478,1.058-0.805,1.667-1.125
		c0.102-0.314,0.451-0.381,0.79-0.459c0.102-0.26,0.35-0.374,0.667-0.416c0.004-0.215,0.129-0.313,0.376-0.293
		c0.104-0.258,0.349-0.373,0.668-0.415c-0.027-0.232,0.155-0.262,0.375-0.251c0.092-0.21,0.37-0.24,0.458-0.459
		c0.452-0.116,0.729-0.409,1.041-0.665c0.444-0.014,0.433-0.484,0.917-0.458c0.311-0.261,0.567-0.575,1.043-0.666
		c0.162-0.229,0.338-0.439,0.707-0.462c0.042-0.081,0.081-0.165,0.125-0.249c0.521-0.104,0.694-0.553,1.251-0.624
		c0.007-0.063-0.018-0.154,0.042-0.168c0.423-0.079,0.52-0.479,1.001-0.497c0.006-0.064-0.017-0.156,0.04-0.169
		c0.253-0.065,0.315-0.323,0.666-0.292c0.268-0.289,0.563-0.548,1-0.667c0.291-0.309,0.591-0.604,1.084-0.707
		c0.19-0.17,0.316-0.407,0.668-0.418c0.105-0.281,0.368-0.407,0.706-0.457c-0.029-0.364,0.484-0.185,0.502-0.499
		c0.512-0.115,0.682-0.57,1.248-0.624c0.008-0.066-0.013-0.157,0.045-0.171c0.438-0.072,0.496-0.529,1-0.543
		c0.006-0.06-0.019-0.149,0.04-0.165c0.458-0.181,0.724-0.553,1.248-0.663c0.283-0.456,0.94-0.536,1.252-0.961h0.21
		c0.17-0.222,0.339-0.438,0.708-0.458c0.154-0.221,0.344-0.404,0.707-0.417c-0.036-0.231,0.116-0.274,0.336-0.248
		c0.139-0.248,0.352-0.427,0.706-0.462c-0.021-0.216,0.116-0.27,0.333-0.249c-0.007-0.104,0.094-0.1,0.127-0.165
		c0.444-0.138,0.712-0.457,1.042-0.713c0.439-0.139,0.763-0.4,1.04-0.705c0.451,0.007,0.472-0.42,0.917-0.418
		c0.33-0.254,0.566-0.601,1.043-0.707c0.192-0.181,0.334-0.416,0.707-0.416c0.045-0.085,0.081-0.17,0.125-0.25
		c0.229,0.048,0.255-0.109,0.333-0.21c0.24,0.028,0.312-0.104,0.376-0.25c0.296,0.059,0.222-0.25,0.541-0.166
		c-0.029-0.366,0.484-0.185,0.5-0.503c0.529-0.149,0.777-0.581,1.334-0.705c0.007-0.065-0.012-0.157,0.044-0.168
		c0.47-0.041,0.516-0.509,0.998-0.542c0.007-0.062-0.014-0.152,0.041-0.166c0.584-0.097,0.79-0.572,1.335-0.705
		c-0.059-0.172,0.09-0.137,0.085-0.253c-0.477-0.413-1.122-0.654-1.543-1.125c-0.364-0.026-0.584-0.196-0.709-0.458
		c-0.482,0.024-0.488-0.431-0.958-0.418c0.007-0.117-0.143-0.081-0.085-0.249c-0.498,0.026-0.468-0.476-0.958-0.458
		c0.053-0.135-0.134-0.031-0.084-0.168h-0.248c-0.378-0.496-1.111-0.641-1.5-1.124h-0.251c0.051-0.137-0.135-0.032-0.083-0.17
		c-0.218,0.026-0.356-0.032-0.335-0.247c-0.234,0.01-0.408-0.039-0.373-0.29c-0.156-0.049-0.118,0.101-0.252,0.08
		c0.023-0.216-0.114-0.272-0.333-0.248c0.057-0.17-0.09-0.131-0.081-0.251c-1.114-0.274-1.607-1.174-2.711-1.459
		c0.041-0.165-0.144-0.106-0.084-0.292c-0.462,0.007-0.47-0.447-0.959-0.415c-0.164-0.366-0.676-0.379-0.873-0.708h-0.25v-0.167
		c-0.484-0.047-0.627-0.43-1.042-0.542v-0.168c-0.3-0.075-0.637-0.113-0.708-0.416c-0.88-0.188-1.205-0.938-2.085-1.124v-0.168
		c-0.528,0-0.537-0.521-1.041-0.543v-0.164c-0.287-0.075-0.614-0.109-0.669-0.416c-0.202,0.008-0.359-0.028-0.375-0.21
		c-0.349-0.01-0.473-0.248-0.666-0.418c-0.5-0.013-0.572-0.455-1.041-0.498c0.014-0.096-0.006-0.161-0.042-0.209
		c-0.328-0.035-0.523-0.201-0.666-0.416c-0.218,0.019-0.357-0.033-0.333-0.251c-0.137-0.023-0.211,0.014-0.25,0.083
		c-0.098-0.236-0.471-0.196-0.457-0.542c-0.192,0.023-0.312-0.021-0.336-0.166c-0.374,0-0.515-0.236-0.706-0.418
		c-0.481,0.006-0.451-0.495-0.961-0.457c0.008-0.117-0.142-0.083-0.082-0.253c-0.475,0.019-0.478-0.439-0.958-0.413
		c-0.017-0.111-0.135-0.118-0.085-0.294c-0.127-0.033-0.051,0.143-0.208,0.085c-0.455-0.432-1.168-0.61-1.542-1.128h-0.249
		c-0.062-0.218-0.375-0.179-0.417-0.414c-0.215,0.007-0.392-0.026-0.375-0.253c-0.328,0.078-0.254-0.243-0.584-0.165
		c-0.214-0.453-0.875-0.458-1.124-0.876c-0.331,0.105-0.381-0.234-0.46-0.292c-0.18-0.135-0.505-0.151-0.583-0.413
		c-0.298-0.078-0.654-0.099-0.706-0.417c-0.484,0.008-0.471-0.48-0.961-0.462c-0.039-0.238-0.356-0.199-0.418-0.415
		c-0.369-0.019-0.545-0.235-0.708-0.456c-0.479-0.093-0.761-0.38-1.042-0.671c-0.187,0.022-0.308-0.022-0.334-0.166
		c-0.241,0.026-0.371-0.072-0.374-0.292c-0.34-0.022-0.516-0.204-0.667-0.418c-0.349-0.04-0.601-0.175-0.707-0.457
		c-0.479,0.021-0.471-0.444-0.957-0.414c-0.167-0.341-0.64-0.366-0.794-0.71h-0.248c0.051-0.135-0.138-0.029-0.084-0.165
		c-0.461,0-0.467-0.451-0.959-0.419c0.057-0.17-0.092-0.13-0.083-0.252h-0.252c-0.242-0.422-0.856-0.473-1.123-0.871
		c-0.545,0.018-0.554-0.504-1.043-0.545v-0.167c-0.297-0.076-0.656-0.094-0.708-0.416h-0.249c-0.301-0.368-0.868-0.466-1.126-0.876
		c-0.347,0.014-0.444-0.222-0.709-0.289v-0.17c-0.34,0.092-0.256-0.241-0.584-0.163c-0.089-0.287-0.403-0.347-0.707-0.418
		c0.057-0.17-0.092-0.131-0.085-0.249c-0.244,0.021-0.372-0.076-0.374-0.293c-0.574-0.177-1.049-0.451-1.375-0.877
		c-0.373,0.055-0.363-0.273-0.71-0.249v-0.166c-0.34-0.021-0.512-0.209-0.666-0.415c-0.205,0.008-0.363-0.026-0.376-0.21
		C762.425,39.742,762.343,39.171,761.719,39.141"/>
	<path fill-rule="evenodd" clip-rule="evenodd" fill="#231F20" d="M769.263,47.312c0.22,0.862,0.832,1.332,0.959,2.29
		c0.197,0.08,0.129,0.427,0.332,0.501c0.05,0.488,0.196,0.888,0.375,1.251c0.031,0.832,0.515,1.21,0.418,2.164
		c0.121,0.104,0.124,0.32,0.249,0.418v0.457c-0.038,0.138,0.108,0.088,0.125,0.17c-0.009,0.496,0.267,0.948,0.332,1.457
		c0.024,0.189-0.05,0.432,0,0.627c0.024,0.085,0.148,0.162,0.168,0.249c0.066,0.301,0.038,0.725,0.082,1.083
		c0.014,0.101,0.018,0.589,0.083,0.835c0.025,0.094,0.147,0.153,0.169,0.251c0.08,0.422,0.034,0.892,0.08,1.331
		c-0.036,0.136,0.11,0.088,0.128,0.166c0.036,0.479-0.113,1.141,0.165,1.376c0.043,0.281-0.106,0.366-0.165,0.544
		c-0.387,0.077-0.353-0.263-0.542-0.374c-0.047-0.135,0.066-0.104,0.082-0.17c-0.125-0.749-0.148-1.6-0.167-2.455
		c-0.405-0.209-0.129-1.099-0.33-1.504c-0.036-0.13,0.034-0.155,0.08-0.208c-0.257-0.777-0.285-1.365-0.416-2.168
		c-0.415-0.378-0.133-1.449-0.541-1.832c-0.048-0.131,0.064-0.103,0.083-0.168c-0.064-0.032-0.063-0.131-0.166-0.124
		c-0.004-0.426,0.017-0.88-0.25-1.041c-0.089-0.357,0.016-0.905-0.294-1.043c0.021-0.198-0.004-0.355-0.166-0.376
		c-0.052-0.134,0.137-0.031,0.086-0.166c-0.246-0.768-0.629-1.402-0.837-2.209c-0.074-0.034-0.115-0.108-0.207-0.124
		c0.023-0.576-0.401-0.711-0.499-1.169c-0.18-0.085-0.363-0.166-0.334-0.457c-0.153-0.014-0.193-0.14-0.375-0.124
		c-0.057-0.478-1.036-0.364-1.418-0.251c-0.056,0.014-0.032,0.104-0.042,0.165c-0.524,0.033-0.741,0.371-0.79,0.877
		c-0.169-0.057-0.132,0.09-0.252,0.082c0.053,0.248-0.097,0.291-0.205,0.374c0.017,0.563-0.334,0.754-0.336,1.294
		c-0.133-0.053-0.032,0.135-0.166,0.084c-0.111,0.639-0.219,1.278-0.459,1.793c-0.015,0.651-0.138,1.193-0.331,1.663
		c0.292,0.354-0.318,0.846,0,1.212c-0.315,0.684-0.123,1.876-0.419,2.584c-0.05,0.175,0.066,0.183,0.085,0.291
		c-0.301,0.484-0.107,1.124-0.168,1.749c-0.097,1.004-0.1,2.149-0.084,3.212c-0.2-0.006-0.114,0.27-0.208,0.372
		c0.23,0.241-0.172,0.634,0.084,0.876c-0.073,0.294-0.117,0.556,0,0.834c-0.178,0.251-0.06,0.767,0.124,0.918v2.292
		c0.064,0.147-0.14,0.024-0.124,0.124c0.48,0.521-0.029,2.031,0.376,2.626c-0.174,0.359-0.092,1.092,0.082,1.374
		c0.05,0.492-0.103,1.187,0.085,1.542c-0.02,0.492,0.004,0.941,0.167,1.249c-0.315,0.486,0.324,1.056,0,1.542
		c0.224,0.581,0.237,1.374,0.331,2.087c0.009,0.058,0.125,0.014,0.125,0.082c0.041,0.972,0.181,1.845,0.419,2.623
		c-0.026,0.637,0.061,1.162,0.249,1.587c0.034,0.13-0.037,0.156-0.084,0.207c0.121,0.391,0.325,1.049,0.252,1.374
		c0.096,0.054,0.039,0.266,0.205,0.25c0.251,1.363,0.655,2.572,0.919,3.917c0.15,0.086,0.136,0.338,0.292,0.416
		c0.055,0.961,0.547,1.484,0.666,2.376c0.088,0.036,0.069,0.179,0.209,0.167c-0.318,0.066,0.241,0.516,0.332,0.667
		c0.029,0.05-0.014,0.152,0,0.209c0.037,0.137,0.25,0.169,0.166,0.457c0.015,0.059,0.105,0.035,0.168,0.044
		c0.055,0.264,0.088,0.552,0.375,0.584c0.033,0.535,0.544,0.594,0.753,0.956h0.288c0.236,0.52,0.906,0.173,1.334,0.083
		c0.985-0.736,1.513-1.933,2.041-3.123c0.054-0.138-0.132-0.033-0.083-0.171c0.246-0.263,0.426-0.6,0.417-1.123
		c0.128,0.032,0.052-0.144,0.21-0.082c-0.012-0.359,0.143-0.552,0.083-0.957c0.484-0.28,0.13-1.399,0.584-1.711
		c0.017-0.176-0.061-0.446,0.081-0.501c0.063-0.157-0.114-0.078-0.081-0.204c0.222-0.405-0.09-1.339,0.375-1.503
		c0.095-0.489-0.028-0.941,0.25-1.376c0.006-0.118-0.143-0.08-0.084-0.251c0.322-0.529,0.129-1.078,0.249-1.831
		c0.034-0.221,0.214-0.416,0.249-0.626c0.076-0.431-0.069-0.82,0.166-1.083c0.056-0.163-0.135-0.085-0.081-0.249
		c0.244-0.717-0.07-1.522-0.167-2.169c0.023-0.44-0.005-0.83-0.167-1.083c-0.055-0.17,0.09-0.132,0.083-0.249
		c-0.255-0.489-0.169-1.471-0.169-2.502c0.004-0.754-0.075-1.551,0-2.042c0.045-0.258,0.321-0.497,0.086-0.708
		c0.06-0.255,0.124-0.32,0-0.582c0.233-0.287,0.127-0.693,0.167-1.045c0.029-0.246,0.199-0.501,0.248-0.791
		c0.057-0.324,0.008-0.64,0.086-0.914c0.077-0.277,0.263-0.505,0.209-0.796c0.081,0,0.053-0.112,0.165-0.08
		c0.034-0.605,0.332-0.951,0.334-1.585c0.29-0.349,0.385-0.896,0.625-1.291c0.092-0.771,0.511-1.213,0.749-1.834
		c0.158,0.062,0.083-0.113,0.21-0.085c0.087-0.479,0.555-0.581,0.666-1.038c0.365-0.096,0.536-0.383,0.792-0.585
		c0.206-0.002,0.394-0.026,0.417-0.208h0.791v0.127c0.632,0.079,0.822,0.594,1.209,0.913c0.074,0.314,0.162,0.615,0.458,0.711
		c0.285,0.813,0.767,1.427,0.918,2.373c0.007,0.048,0.076,0.035,0.125,0.044c0.324,1.716,0.78,3.301,0.916,5.208
		c0.14,0.097,0.082,0.389,0.208,0.502c0.049,0.13-0.064,0.1-0.082,0.165c0.084,0.14,0.279,0.506,0.082,0.666
		c0.129,0.078,0.023,0.392,0.166,0.457c-0.087,1.717,0.193,3.971-0.166,5.336c0.106,0.449,0.109,1.001,0.335,1.334h0.249
		c0.06,0.065,0.087,0.163,0.124,0.249c0.19-0.024,0.312,0.023,0.333,0.167c0.458-0.013,0.638,0.253,1.168,0.167
		c0.231-0.157,0.523-0.256,0.833-0.334c0.134-0.617,0.632-0.869,0.583-1.666c0.127,0.03,0.052-0.144,0.211-0.084
		c0.05-0.435,0.093-0.877,0.166-1.292c0.134,0.05,0.029-0.136,0.166-0.083c0.223-1.372,0.732-2.461,1.042-3.749
		c0.273-0.24,0.418-0.61,0.876-0.669c0.067-0.055,0.143-0.106,0.163-0.206c0.345-0.091,0.764-0.101,1.211-0.084
		c-0.002,0.157,0.199,0.104,0.167,0.29c0.571-0.055,0.458,0.568,0.876,0.669c-0.003,0.485,0.18,0.787,0.416,1.04
		c-0.065,0.453,0.227,0.549,0.25,0.915c0.074,0.038,0.115,0.11,0.21,0.126c-0.039,0.344,0.223,0.388,0.246,0.669
		c0.232,0.236,0.983,0.662,1.126,0.041c0.469-0.118,0.509-0.66,0.957-0.791c-0.06-0.339,0.397-0.163,0.335-0.503
		c0.18-0.099,0.489-0.065,0.458-0.372c0.267-0.083,0.694-0.002,0.709-0.334c0.36-0.019,0.667-0.086,0.876-0.251
		c0.512,0.063,1.031-0.089,1.459,0c0.265,0.054,0.405,0.316,0.666,0.251c-0.005,0.145,0.172,0.104,0.167,0.249
		c0.211-0.104,0.179,0.117,0.291,0.168c0.263,0.119,0.514,0.368,0.667,0.667c0.078,0.149,0.4,0.268,0.541,0.458
		c0.066,0.088,0.041,0.196,0.083,0.249c0.106,0.126,0.219,0.115,0.335,0.209c0.066,0.053,0.084,0.195,0.168,0.247
		c0.047,0.034,0.152-0.025,0.205,0c0.083,0.041,0.09,0.269,0.336,0.171c0.207,0.261,0.604,0.335,1.041,0.372
		c-0.015,0.072,0.032,0.081,0.084,0.085c0.04,0.182-0.047,0.229-0.084,0.333h-0.96c-0.091-0.383-0.738-0.206-0.79-0.625h-0.251
		c-0.108-0.336-0.457-0.43-0.666-0.669c0.027-0.249-0.067-0.376-0.292-0.372c0.063-0.188-0.123-0.131-0.085-0.293
		c-0.145-0.089-0.342-0.125-0.332-0.375c-0.542-0.303-1.32-0.809-2.46-0.625c-0.22,0.035-0.489,0.189-0.707,0.248h-0.375
		c-0.057,0.014-0.034,0.104-0.042,0.17c-0.579,0.196-0.893,0.664-1.377,0.959v0.288c-0.338-0.006-0.233,0.432-0.582,0.42
		c-0.04,0.275-0.392,0.245-0.459,0.498c-1.258,0.22-1.306-0.775-1.832-1.29c-0.05-0.313-0.102-0.621-0.418-0.668
		c-0.035-0.281,0.024-0.664-0.248-0.71c0.021-0.189-0.022-0.307-0.167-0.333v-0.291c-0.322-0.445-0.624-0.904-1.542-0.751
		c-0.226,0.138-0.405,0.317-0.667,0.42c-0.045,0.314-0.167,0.552-0.332,0.748c-0.062,0.159,0.112,0.08,0.081,0.207
		c-0.199,0.313-0.348,0.681-0.417,1.126h-0.126v0.624c-0.133-0.05-0.031,0.139-0.166,0.086c-0.044,0.607-0.292,1.013-0.333,1.624
		c-0.135-0.05-0.031,0.135-0.166,0.085v0.456c-0.417,0.75-0.439,1.894-1.167,2.336c-0.044,0.081-0.083,0.163-0.125,0.248
		c-0.509,0.02-0.865,0.189-1.334,0.25c-0.152-0.025-0.465-0.296-0.583-0.082c-0.456-0.104-0.782-0.675-1.333-0.416
		c0.016,0.406-0.271,0.687-0.333,1.084c-0.04,0.269,0.06,0.543,0,0.83c-0.03,0.152-0.208,0.261-0.25,0.418
		c-0.116,0.448-0.035,0.979-0.167,1.417c-0.4,0.338-0.321,1.151-0.706,1.502c-0.063,0.158,0.113,0.08,0.08,0.208
		c-0.118-0.009-0.079,0.139-0.251,0.083c0.052,0.285-0.143,0.326-0.082,0.625c-0.167-0.06-0.132,0.091-0.25,0.083v0.25
		c-0.193,0.14-0.28,0.386-0.374,0.627c-0.125,0.096-0.372,0.074-0.332,0.331c-0.591,0.009-1.227,0.618-1.752,0.165
		c-0.155-0.043-0.119,0.104-0.251,0.086c0.051-0.134-0.133-0.034-0.084-0.169c-0.479,0.051-0.624-0.237-0.957-0.331
		c-0.177-0.463-0.566-0.714-0.667-1.251c-0.539-0.504-0.766-1.317-1.041-2.084c-0.067-0.059-0.164-0.085-0.207-0.165
		c-0.049-0.131,0.063-0.103,0.081-0.169c-0.32-0.332-0.343-0.959-0.583-1.373c-0.812,0.315-0.24,2.007-0.71,2.667
		c-0.049,0.177,0.07,0.184,0.085,0.29c-0.43,0.376-0.081,1.532-0.499,1.918v0.498c-0.152,0.34-0.249,1.069-0.208,1.337
		c-0.231,0.366-0.265,0.931-0.336,1.455c-0.29,0.14-0.248,0.614-0.248,1.044c-0.234,0.114-0.258,0.44-0.252,0.79h-0.125
		c-0.08,0.492-0.412,0.728-0.415,1.293c-0.243,0.272-0.384,0.645-0.5,1.044c-0.158-0.063-0.08,0.11-0.208,0.08
		c0.001,0.515-0.455,0.578-0.502,1.042c-0.469,0.187-0.517,0.791-0.999,0.961c-0.818,0.099-1.451,0.362-2.043,0
		c-0.044-0.026-0.104-0.223-0.166-0.253c-0.039-0.017-0.134,0.028-0.166,0c-0.145-0.119-0.125-0.465-0.459-0.458
		c0.035-0.256-0.207-0.235-0.334-0.334c0.069-0.262-0.126-0.262-0.249-0.331c-0.095-0.436-0.247-0.813-0.542-1.043
		c0.001-0.376-0.201-0.547-0.416-0.708c-0.001-0.524-0.303-0.753-0.337-1.25c-0.069-0.039-0.065-0.156-0.206-0.125
		c0.04-0.292-0.146-0.354-0.082-0.665c-0.358-0.354-0.291-1.131-0.667-1.461c-0.044-0.154,0.103-0.12,0.081-0.25
		c-0.412-1.013-0.68-2.18-1.041-3.249c0.14-0.782-0.568-1.832-0.456-2.585c-0.234-0.419-0.161-1.146-0.419-1.542
		c0.073-0.311-0.109-1.011-0.249-1.376c0.262-0.284-0.268-0.669,0-0.956c-0.292-0.704-0.246-1.51-0.459-2.127
		c0.102-0.904-0.392-2.348-0.249-3.29c-0.205-0.463-0.082-1.253-0.252-1.749c-0.059-0.748,0.149-1.764-0.167-2.254
		c0.057-0.287,0.104-0.395,0-0.667c-0.023-0.091,0.082-0.058,0.084-0.123c0.009-0.659,0.026-1.434-0.084-1.917
		c-0.095-0.422,0.144-0.719-0.04-1.085h-0.166c-0.077,0.523-0.24,1.258-0.169,1.669c-0.38,0.727-0.072,2.147-0.417,2.918
		c-0.059,0.156,0.116,0.078,0.084,0.204c-0.137-0.05-0.032,0.137-0.164,0.085v1.375c0.035,0.135-0.141,0.056-0.128,0.167
		c0.138,0.459-0.149,0.869-0.167,1.252c-0.008,0.227,0.112,0.331,0.084,0.498c-0.017,0.111-0.14,0.195-0.166,0.334
		c-0.137,0.74,0.051,1.611-0.25,2.293c0.151,0.169,0.059,0.576,0.084,0.875c-0.436,0.562-0.058,1.942-0.416,2.581
		c-0.053,0.137,0.133,0.034,0.081,0.169c-0.238,0.249-0.153,0.817-0.166,1.289c0.037,0.136-0.138,0.058-0.125,0.171
		c0.064,0.562-0.117,0.879-0.084,1.415c-0.264,0.542-0.223,1.387-0.331,2.084c-0.352,0.62-0.177,1.77-0.627,2.293
		c-0.121,1.141-0.381,2.149-0.751,3.042c-0.061,0.157,0.116,0.079,0.084,0.206c-0.257,0.175-0.185,0.678-0.457,0.834
		c-0.016,0.086,0.009,0.131,0.082,0.124c-0.157,0.136-0.113,0.471-0.331,0.543c0.068,0.47-0.273,0.533-0.252,0.958
		c-0.144,0.023-0.191,0.146-0.166,0.336c-0.143,0.019-0.189,0.142-0.167,0.333c-0.171,0.019-0.233,0.151-0.208,0.374
		c-0.261,0.323-0.644,0.521-0.834,0.916c-0.374,0.166-0.685,0.399-1.084,0.542c-0.13,0.049-0.101-0.067-0.167-0.082
		c-0.44,0.386-0.955-0.045-1.461-0.085c0.011-0.117-0.137-0.08-0.078-0.248c-0.704-0.189-1-0.779-1.293-1.379
		c-0.015-0.055-0.105-0.032-0.168-0.038c0.084-0.32-0.226-0.246-0.166-0.543c-0.087-0.055-0.216-0.064-0.293-0.126
		c-0.032-0.677-0.391-1.023-0.5-1.625c-0.065-0.032-0.062-0.131-0.167-0.123c0.067-0.54-0.14-0.804-0.375-1.043
		c0.04-0.525-0.134-0.838-0.332-1.126c0.098-0.68-0.451-1.398-0.418-2.293c-0.004-0.047-0.075-0.032-0.124-0.039
		c-0.272-1.288-0.424-2.686-0.835-3.835c0.159-0.177,0.013-0.529-0.081-0.708c-0.313,0.211-0.554,0.499-0.54,1.041
		c-0.169-0.06-0.136,0.092-0.253,0.082v0.292c-0.184-0.059-0.11,0.137-0.124,0.254c-0.144-0.008-0.105,0.169-0.251,0.163
		c0.042,0.473-0.315,0.548-0.333,0.96c-0.411,0.268-0.51,0.852-1,1.042c-0.056,0.012-0.034,0.101-0.041,0.166
		c-0.272,0.092-0.56,0.163-0.752,0.334c-0.889-0.014-1.568-0.236-1.915-0.791c-0.567-0.335-0.645-1.163-1.168-1.542
		c-0.044-0.318-0.258-0.463-0.25-0.836c-0.134-0.17-0.215-0.396-0.25-0.666c-0.065-0.03-0.061-0.131-0.168-0.125
		c-0.084-0.385-0.074-0.868-0.373-1.043c-0.065-0.723-0.473-1.11-0.418-1.955c-0.063-0.034-0.061-0.135-0.165-0.126
		c0-0.651-0.244-1.063-0.25-1.71c-0.126-0.014-0.013-0.264-0.211-0.209c-0.021-1.019-0.129-1.948-0.08-3.042
		c-0.398,0.091-0.469,0.505-0.585,0.876c-0.213-0.088-0.037,0.213-0.208,0.167c-0.021,0.573-0.343,0.85-0.417,1.374
		c-0.416,0.46-0.562,1.188-1.039,1.585c-0.152,0.491-0.645,0.634-0.959,0.959c-0.14-0.014-0.259-0.009-0.294,0.082
		c-0.157-0.009-0.378,0.044-0.417-0.082h-0.417c-1.102-0.494-1.504-1.695-1.707-3.084c-0.006-0.05-0.078-0.036-0.127-0.043
		c-0.016-0.497-0.244-0.782-0.249-1.294c-0.309-0.313-0.317-0.93-0.417-1.456c-0.303-0.282-0.16-0.511-0.25-0.961
		c-0.075-0.032-0.115-0.104-0.211-0.124c0.018-0.555-0.393-0.686-0.581-1.039c-0.313,0.071-0.442-0.183-0.625,0
		c-0.17,0.058-0.131-0.091-0.249-0.084c-0.557,0.168-0.929,0.516-0.96,1.208c-0.768,0.732-0.865,2.133-1.708,2.792
		c-0.153-0.063-0.323-0.136-0.458,0c-0.466-0.159-0.945-0.303-1.125-0.749c-0.013-0.057-0.104-0.034-0.167-0.045
		c-0.129-0.564-0.723-0.661-0.959-1.123c-0.369-0.019-0.546-0.229-0.708-0.457c-0.181,0.014-0.337,0-0.332-0.17
		c-0.229,0.17-0.561-0.006-0.791-0.08c-0.475,0.096-0.665,0.089-1.127,0c-0.396-0.037-0.393,0.329-0.749,0.333
		c-0.045,0.233-0.363,0.19-0.375,0.459c-0.213-0.063-0.18,0.124-0.375,0.081c-0.28,0.585-0.688,1.036-1.126,1.458h-0.207
		c-0.04,0.075-0.156,0.067-0.126,0.21c-0.51,0.113-0.859,0.392-1.545,0.334c-0.069-0.097-0.106-0.227-0.079-0.417
		c0.304-0.043,0.575-0.117,0.874-0.17c0.152-0.331,0.769-0.202,0.833-0.62c0.193,0.052,0.14-0.143,0.333-0.085
		c0.108-0.477,0.58-0.589,0.709-1.041c0.298-0.147,0.607-0.282,0.709-0.625c0.317-0.033,0.671-0.023,0.706-0.334
		c0.688-0.131,1.646-0.112,2.336,0c0.095,0.21,0.368,0.243,0.666,0.249c0.143,0.218,0.364,0.358,0.626,0.457
		c0.016,0.234,0.145,0.354,0.415,0.333c-0.021,0.26,0.211,0.266,0.377,0.333c0.009,0.17,0.052,0.314,0.249,0.295
		c-0.096,0.457,0.43,0.294,0.418,0.667c0.647,0.241,1.159-0.104,1.249-0.667c0.184,0.059,0.111-0.142,0.249-0.125
		c-0.038-0.261,0.148-0.3,0.084-0.583c0.33-0.144,0.214-0.736,0.627-0.795c-0.033-0.712,0.473-0.887,0.665-1.375h0.208
		c0.058-0.01,0.034-0.102,0.041-0.165c0.542-0.104,1.212-0.117,1.752,0c-0.032,0.129,0.142,0.051,0.083,0.208
		c0.278,0.043,0.247,0.391,0.498,0.457c0.098,0.601,0.501,0.891,0.461,1.627c0.123,0.1,0.126,0.317,0.249,0.417
		c-0.035,0.673,0.33,0.948,0.332,1.581c0.198,0.21,0.111,0.7,0.378,0.837c-0.076,0.879,0.34,1.267,0.417,1.999
		c0.263,0.278,0.537,0.544,0.789,0.834c1.062,0.172,1.448-0.334,1.92-0.75c-0.066-0.215,0.125-0.18,0.082-0.377
		c0.57-0.608,0.752-1.611,1.29-2.249c-0.055-0.25,0.14-0.25,0.085-0.503c0.155,0.046,0.064-0.151,0.208-0.122
		c0.044-0.785,0.632-1.362,0.751-2.21c0.188-1.339-0.077-2.921,0.08-4.292c-0.02-0.087,0.063-0.078,0.128-0.083
		c0.108-0.668-0.211-1.768,0.582-1.75c-0.008,0.136,0.071,0.18,0.167,0.209c0.435,3.107,0.093,6.99,0.415,10.209
		c-0.034,0.135,0.11,0.086,0.125,0.169c0.03,0.196-0.066,0.51,0.086,0.581c0.057,1.013,0.09,2.046,0.416,2.792
		c-0.024,1.277,0.563,1.938,0.708,3.042c0.067,0.033,0.065,0.131,0.166,0.124c0.112,0.96,0.627,1.516,1.126,2.084
		c0.534,0.138,1.026,0.209,1.625,0.085c0.167-0.099,0.193-0.338,0.458-0.334c0.074-0.689,0.824-0.705,0.792-1.501
		c0.202-0.131,0.313-0.354,0.333-0.667c0.321-0.04,0.362-0.359,0.377-0.705c0.167,0.057,0.129-0.092,0.251-0.086
		c0.087-0.679,0.733-1.23,0.414-2.042c0.205-0.427-0.164-0.852-0.168-1.335c-0.064-0.031-0.061-0.131-0.165-0.124
		c-0.024-1.307-0.432-2.234-0.417-3.582c-0.01-0.063-0.125-0.018-0.125-0.083c0.053-0.379-0.227-0.712,0-0.999
		c0.21-0.059,0.221,0.085,0.375,0.08c0.143,0.99,0.247,1.978,0.5,2.957c0.035,0.135-0.035,0.161-0.081,0.211
		c0.321,0.513,0.099,1.568,0.624,1.875c0.216-0.103,0.249-0.392,0.251-0.707c0.209-0.099,0.241-0.372,0.247-0.667
		c0.426-0.258,0.336-1.027,0.71-1.334v-0.749c0.139,0.013,0.047-0.205,0.252-0.127c-0.027-0.429,0.205-0.598,0.164-1.041
		c0.491-0.175,0.222-1.112,0.627-1.374v-0.585c0.297-0.283,0.298-0.871,0.331-1.415c0.136,0.049,0.032-0.136,0.169-0.084
		c0.201-0.827,0.408-1.646,0.623-2.46c-0.05-0.219,0.038-0.395,0.168-0.832c0.062-0.213,0.142-0.425,0.25-0.542
		c-0.128-0.125-0.073-0.428-0.085-0.667c0.502-0.363-0.157-0.738-0.081-1.293c-0.169-0.138-0.249-0.363-0.252-0.666
		c-0.422-0.66-0.603-1.562-0.792-2.459c-0.065-0.03-0.061-0.133-0.165-0.127c-0.173-0.871-0.352-1.681-0.502-2.372
		c0.085-0.308,0.013-0.584-0.206-0.794c-0.027-0.195,0.069-0.513-0.085-0.582c0.277-0.315-0.25-0.693,0-0.959
		c-0.348-1.017-0.063-2.658-0.331-3.752c0.25-0.401,0.049-1.256,0.331-1.624c0.032-0.129-0.143-0.051-0.084-0.21
		c0.04-0.226,0.184-0.344,0.169-0.625c0.169-0.386,0.437-0.672,0.542-1.124c0.401-0.476,0.902-0.849,1.918-0.711
		c0.058,0.165,0.222,0.226,0.411,0.252c-0.037,0.349,0.276,0.341,0.253,0.666c0.069,0.058,0.089,0.162,0.251,0.127
		c-0.202,0.68,0.394,0.979,0.377,1.415c-0.008,0.145-0.1,0.479,0.081,0.754c-0.113,0.731-0.202,2.136,0.083,2.748
		c-0.03,0.282-0.076,0.2,0,0.458c-0.357,0.505-0.006,1.716-0.249,2.334c0.196,0.451-0.073,1.313-0.167,1.96
		c0.216,0.258-0.087,0.528,0,0.914c0.037,0.135-0.137,0.059-0.125,0.17c0.02,1.409-0.396,2.38-0.504,3.665
		c0.05,0.425,0.352,0.597,0.337,1.085c0.198,0.025,0.074,0.367,0.292,0.374v0.333c0.438,0.716,0.797,1.507,1.043,2.42
		c0.064,0.03,0.062,0.131,0.165,0.123c-0.001,0.485,0.182,0.79,0.417,1.041c0.051,0.138-0.134,0.033-0.083,0.169
		c0.323,0.27,0.134,1.057,0.541,1.249c-0.023,0.19,0.023,0.309,0.165,0.333c-0.047,0.409,0.219,0.507,0.169,0.916
		c0.377,0.318,0.188,1.2,0.582,1.502c0.007,0.101,0.01,0.201-0.082,0.208c0.089,0.037,0.073,0.176,0.208,0.163
		c-0.195,0.11,0.096,0.375,0.167,0.712c0.115,0.552,0.253,1.262,0.418,1.667c0.039,0.528,0.157,0.979,0.331,1.375v0.29
		c0.143,0.08,0.066,0.379,0.209,0.459c-0.149,0.161,0.021,0.467,0.082,0.666c0.038,0.131-0.033,0.157-0.082,0.209
		c0.262,0.746,0.19,1.652,0.418,2.253c-0.286,0.48,0.278,1.19,0,1.665c0.121,0.388-0.206,1.404,0.209,1.709
		c0.346-0.161,0.198-0.559,0.248-0.877c0.096-0.625,0.388-1.356,0.25-2.164c0.293-0.099,0.146-0.634,0.335-0.836
		c-0.06-0.403-0.088-0.873,0-1.498c0.022-0.162,0.141-0.299,0.166-0.461c0.04-0.251-0.032-0.523,0-0.748
		c0.009-0.071,0.103-0.103,0.125-0.168c0.127-0.411,0.007-1.169,0.084-1.751c0.062-0.483,0.333-0.745,0.082-1.042
		c0.337-0.761,0.113-1.694,0.249-2.539c0.025-0.143,0.146-0.26,0.168-0.377c0.177-0.922,0.019-1.893,0.167-2.835
		c0.059-0.095,0.081-0.224,0.166-0.29c0.051-0.175-0.066-0.184-0.085-0.292c0.446-0.666,0.11-2.111,0.462-2.874
		c0.056-0.171-0.092-0.131-0.085-0.254c0.243-0.885,0.293-1.98,0.5-2.83c0.073-0.296-0.053-0.666,0-0.999
		c0.033-0.216,0.111-0.381,0.168-0.71c0.08-0.473,0.111-0.899,0.083-1.374c-0.021-0.092,0.06-0.081,0.124-0.086
		c0.008-0.381-0.058-0.835,0.166-0.998c0.025-0.136-0.013-0.21-0.084-0.251c0.26-0.657,0.211-1.625,0.502-2.252
		c0.024-0.092-0.079-0.059-0.083-0.124c0.121-0.144,0.057-0.473,0.249-0.542c0.051-0.135-0.135-0.032-0.082-0.17
		c0.329-0.967,0.463-1.829,0.623-3.04c0.066-0.491,0.166-1.011,0.419-1.377c-0.069-0.521,0.176-0.738,0.167-1.206
		c0.15,0.041,0.017-0.204,0.209-0.126c0.018-0.605,0.253-0.993,0.498-1.371c0.052-0.138-0.136-0.035-0.085-0.17
		c0.16-0.245,0.298-0.506,0.334-0.875c0.162-0.157,0.362-0.275,0.376-0.584c0.363-0.136,0.393-0.606,0.791-0.711
		c0.046-0.091,0.2-0.073,0.168-0.247c0.452-0.075,0.958-0.151,1.375-0.083c0.248,0.038,0.639,0.239,0.835,0.416
		c0.027,0.027-0.025,0.137,0,0.167c0.027,0.036,0.41,0.257,0.457,0.336c0.056,0.091,0.019,0.274,0.082,0.33
		C769.095,47.229,769.178,47.268,769.263,47.312 M753.927,55.852c0.055-0.165-0.139-0.083-0.086-0.249
		c0.284-0.813-0.213-1.921,0.086-2.752c-0.13-0.535-0.142-1.188-0.168-1.832c-0.102-0.066-0.118-0.213-0.208-0.291
		c0.037-0.733-0.339-1.051-0.666-1.417c-0.224,0.056-0.273-0.059-0.377-0.125c-0.265,0.095-0.545,0.177-0.624,0.458
		c-0.33-0.053-0.208,0.347-0.501,0.333v0.375c-0.133-0.053-0.031,0.135-0.165,0.082c-0.312,1.207-0.111,2.921-0.166,4.378
		c0.142,0.241,0.251,0.749,0.084,1.04c0.257,0.26,0.073,0.955,0.331,1.213c-0.223,0.564,0.275,1.106,0.168,1.832
		c0.301,0.645,0.499,1.389,0.623,2.21c0.106-0.009,0.103,0.091,0.169,0.126c0.113,0.605,0.186,1.255,0.543,1.625
		c0.374-0.146,0.166-0.577,0.247-1.002c0.027-0.139,0.146-0.239,0.164-0.374c0.097-0.584,0-1.255,0.17-1.793
		c-0.085-0.25,0.01-0.174,0-0.501c0.13,0.032,0.052-0.143,0.208-0.084c-0.067-0.419,0.144-1.113-0.125-1.333
		C754.023,57.424,753.703,56.366,753.927,55.852 M782.806,66.355c-0.113-0.814-0.372-1.489-0.418-2.374
		c-0.104,0.009-0.101-0.091-0.166-0.126c0.05-0.408-0.215-0.505-0.168-0.917c-0.234,0-0.098-0.372-0.29-0.416
		c-0.188-1.089-0.631-1.925-1.293-2.543c-0.738-0.223-1.143,0.282-1.582,0.71c-0.116,0.113-0.067,0.243-0.168,0.418
		c-0.067,0.12-0.266,0.217-0.333,0.33c-0.057,0.104-0.033,0.264-0.085,0.378c-0.131,0.289-0.397,0.669-0.334,1.043h-0.123
		c0.039,0.466-0.33,0.527-0.251,1.041c-0.134-0.053-0.029,0.135-0.166,0.082c-0.085,0.889-0.438,1.507-0.501,2.417h-0.126
		c-0.059,0.159,0.188,0.01,0.126,0.166c-0.431,0.686-0.229,1.567-0.374,2.502c-0.021,0.156-0.145,0.298-0.167,0.457
		c-0.135,0.991-0.074,1.71-0.166,2.667c-0.083,0.848-0.084,1.899,0,2.75c0.056,0.561,0.065,1.103,0.252,1.584
		c0.06,0.159-0.116,0.081-0.086,0.209c0.096,0.119,0.096,0.214,0,0.336c0.339,0.549,0.105,1.672,0.541,2.123
		c0.038,1.003,0.29,1.797,0.588,2.541c-0.023,0.064-0.135,0.034-0.087,0.169c0.262,0.414,0.308,0.602,0.417,1.081
		c0.141-0.028,0.137,0.087,0.209,0.127c0.065,0.576,0.453,0.821,0.583,1.332c0.199,0.081,0.416,0.14,0.458,0.374
		c0.261-0.065,0.261,0.131,0.417,0.17c0.135,0.048,0.031-0.137,0.167-0.085c0.425,0.255,0.987-0.065,1.415-0.085
		c0.043-0.083,0.082-0.17,0.128-0.249h0.209c0.138-0.417,0.464-0.646,0.54-1.123c0.184,0.06,0.109-0.143,0.25-0.126
		c0.038-0.781,0.476-1.168,0.499-1.959c0.149,0.04,0.021-0.203,0.209-0.125c0.176-0.572-0.048-1.029,0.333-1.376
		c0.09-0.512-0.024-1.274,0.333-1.708c0.039-0.167-0.143-0.105-0.082-0.291c0.139-0.141,0.067-0.49,0.082-0.751
		c-0.72-1.587-0.303-4.658-0.415-6.586c-0.004-0.065-0.107-0.032-0.085-0.123c0.028-1.181-0.059-2.247-0.166-3.292
		c-0.049-0.008-0.117,0.006-0.124-0.043v-0.5c0.063-0.005,0.144,0.007,0.124-0.083C782.995,66.333,782.788,66.455,782.806,66.355
		 M753.381,67.439c-0.229-0.087-0.171-0.616-0.54-0.332c-0.255,0.626-0.296,1.908-0.666,2.416c0.168,0.516-0.212,1.052-0.373,1.501
		c-0.014,0.111,0.161,0.034,0.124,0.165h-0.124c-0.085,0.627-0.404,1.014-0.419,1.711c-0.133-0.052-0.031,0.136-0.165,0.084
		c-0.023,0.321-0.133,0.562-0.082,0.957c-0.171-0.056-0.133,0.09-0.253,0.083c-0.035,0.131,0.1,0.432-0.125,0.375
		c-0.068,0.748-0.316,1.324-0.583,1.874c0.004,0.068,0.107,0.034,0.086,0.127c-0.259-0.032-0.072,0.374-0.252,0.415
		c-0.032,0.129,0.145,0.051,0.085,0.207c-0.217,0.079-0.148,0.438-0.377,0.503c-0.087,0.566-0.241,1.063-0.583,1.373
		c0.018,0.553-0.46,1.044-0.082,1.458c-0.231,0.099,0.161,0.397-0.086,0.545c0.236,0.558,0.375,1.206,0.419,1.955
		c0.087,0.701,0.338,0.935,0.332,1.836c0.154,0,0.1,0.209,0.209,0.251c-0.021,0.686,0.224,1.108,0.335,1.667
		c-0.127-0.043-0.075,0.092-0.086,0.166c0.388,0.32,0.189,1.229,0.583,1.542v0.292c0.351,0.457,0.351,1.263,0.709,1.707
		c0.047,0.483,0.206,0.849,0.459,1.123v0.335c0.201,0.131,0.313,0.355,0.332,0.667c0.273,0.05,0.301,0.339,0.335,0.627
		c0.201,0.062,0.339,0.187,0.377,0.415c0.189-0.054,0.137,0.138,0.333,0.086c-0.063,0.183,0.119,0.127,0.078,0.289
		c0.537,0.098,1.543,0.225,1.795-0.208c0.437-0.064,0.556-0.442,0.917-0.582c-0.012-0.218,0.019-0.4,0.251-0.374
		c-0.079-0.329,0.243-0.258,0.165-0.584c0.169,0.057,0.132-0.092,0.252-0.085c0.271-0.841,0.515-1.705,0.873-2.459
		c-0.016-0.106-0.133-0.113-0.082-0.289c0.358-0.548,0.226-1.581,0.251-2.464c0.457-0.26-0.093-1.049,0.208-1.497
		c-0.047-0.223-0.111-0.257,0-0.458c-0.449-0.565-0.102-1.924-0.459-2.584c0.266-0.278-0.268-0.641,0-0.916
		c-0.381-0.693-0.203-1.584-0.583-2.292c0.075,0.004,0.096-0.038,0.083-0.124c-0.248-1.046-0.669-1.918-0.708-3.168
		c-0.313-0.244-0.224-0.889-0.502-1.17c0.021-0.063,0.132-0.032,0.084-0.163c-0.228-0.147-0.306-0.446-0.291-0.837
		c-0.295-0.273-0.32-0.815-0.417-1.288c-0.303-0.266-0.221-0.919-0.542-1.168c0.007-0.326-0.078-0.562-0.25-0.709
		c-0.017-0.303-0.07-0.57-0.249-0.708c0.018-0.064,0.13-0.036,0.085-0.167c-0.253-0.276-0.431-0.628-0.421-1.166
		C753.532,68.236,753.398,67.901,753.381,67.439"/>
	<path fill-rule="evenodd" clip-rule="evenodd" fill="#231F20" d="M740.675,63.357c-0.494-0.092-0.275-0.894-0.585-1.17v-1.333
		c0.259-0.379,0.132-1.146,0.543-1.371c0.383,0.226,0.308,0.913,0.5,1.332C741.154,61.833,741.145,62.823,740.675,63.357"/>
</g>
<g>
	<rect x="28.574" y="559.987" fill="none" width="784.929" height="35.289"/>
	<path fill="#231F20" d="M30.148,567.994h-0.834v-6.179h2.78c1.145,0,1.833,0.739,1.833,1.738c0,0.86-0.491,1.833-1.833,1.833
		h-1.945V567.994z M30.148,564.671h1.661c0.749,0,1.256-0.275,1.256-1.109c0-0.783-0.533-1.033-1.222-1.033h-1.696V564.671z"/>
	<path fill="#231F20" d="M35.655,567.994h-0.757v-4.501h0.714v0.749h0.017c0.301-0.525,0.697-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V567.994z"/>
	<path fill="#231F20" d="M39.569,563.364c1.437,0,2.091,1.213,2.091,2.375s-0.654,2.375-2.091,2.375
		c-1.438,0-2.091-1.213-2.091-2.375S38.131,563.364,39.569,563.364z M39.569,567.46c1.084,0,1.308-1.119,1.308-1.722
		s-0.224-1.721-1.308-1.721c-1.084,0-1.308,1.118-1.308,1.721S38.484,567.46,39.569,567.46z"/>
	<path fill="#231F20" d="M45.488,565.007c-0.095-0.603-0.43-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.947-0.378,1.076-1.066h0.731c-0.078,0.714-0.516,1.721-1.833,1.721
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.179,0,1.626,0.86,1.687,1.644H45.488z"/>
	<path fill="#231F20" d="M50.929,566.583c-0.026,0.215-0.232,0.852-0.8,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.299,0,1.937,1.032,1.937,2.625h-3.288
		c0,0.938,0.439,1.479,1.308,1.479c0.714,0,1.136-0.551,1.162-0.886H50.929z M50.207,565.386c-0.043-0.697-0.336-1.343-1.257-1.343
		c-0.697,0-1.248,0.646-1.248,1.343H50.207z"/>
	<path fill="#231F20" d="M55.806,566.583c-0.026,0.215-0.232,0.852-0.8,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.299,0,1.937,1.032,1.937,2.625h-3.288
		c0,0.938,0.439,1.479,1.308,1.479c0.714,0,1.136-0.551,1.162-0.886H55.806z M55.083,565.386c-0.043-0.697-0.336-1.343-1.257-1.343
		c-0.697,0-1.248,0.646-1.248,1.343H55.083z"/>
	<path fill="#231F20" d="M60.618,567.994h-0.714v-0.619h-0.017c-0.327,0.61-0.843,0.748-1.282,0.748
		c-1.532,0-1.979-1.438-1.979-2.513c0-1.265,0.68-2.246,1.876-2.246c0.818,0,1.162,0.508,1.343,0.766l0.017-0.06v-2.255h0.757
		V567.994z M58.622,567.468c0.482,0,1.23-0.318,1.23-1.515c0-0.731-0.12-1.91-1.213-1.91c-1.17,0-1.23,1.11-1.23,1.687
		C57.408,566.72,57.795,567.468,58.622,567.468z"/>
	<path fill="#231F20" d="M61.798,562.675v-0.86h0.757v0.86H61.798z M62.556,567.994h-0.757v-4.501h0.757V567.994z"/>
	<path fill="#231F20" d="M67.429,567.994h-0.757v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.164-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M72.367,567.624c0,0.646-0.009,2.264-2.091,2.264c-0.534,0-1.575-0.146-1.721-1.343h0.757
		c0.138,0.714,0.835,0.714,1.016,0.714c1.308,0,1.282-1.05,1.282-1.583v-0.189h-0.017v0.034c-0.198,0.318-0.594,0.603-1.196,0.603
		c-1.532,0-1.979-1.438-1.979-2.513c0-1.265,0.68-2.246,1.876-2.246c0.818,0,1.162,0.508,1.343,0.766h0.017v-0.637h0.714V567.624z
		 M70.413,567.468c0.482,0,1.23-0.318,1.23-1.515c0-0.731-0.12-1.91-1.213-1.91c-1.17,0-1.23,1.11-1.23,1.687
		C69.2,566.72,69.587,567.468,70.413,567.468z"/>
	<path fill="#231F20" d="M76.108,564.775c-0.008-0.284-0.112-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.757,0.189c0.938,0.231,1.265,0.576,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.782,0-1.911-1.032-1.937-1.574H74c0.026,0.353,0.129,0.92,1.196,0.92c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.481-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H76.108z"/>
	<path fill="#231F20" d="M82.354,563.364c1.437,0,2.091,1.213,2.091,2.375s-0.654,2.375-2.091,2.375
		c-1.438,0-2.091-1.213-2.091-2.375S80.916,563.364,82.354,563.364z M82.354,567.46c1.084,0,1.308-1.119,1.308-1.722
		s-0.224-1.721-1.308-1.721s-1.308,1.118-1.308,1.721S81.269,567.46,82.354,567.46z"/>
	<path fill="#231F20" d="M86.268,564.121v3.873h-0.757v-3.873h-0.62v-0.628h0.62v-0.774c0-0.68,0.43-0.99,1.17-0.99
		c0.112,0,0.224,0.009,0.344,0.018v0.68c-0.095-0.008-0.215-0.017-0.31-0.017c-0.327,0-0.447,0.163-0.447,0.524v0.56h0.757v0.628
		H86.268z"/>
	<path fill="#231F20" d="M91.318,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97H89.95v-0.628h0.611v-1.257h0.757v1.257h0.723v0.628H91.318z"/>
	<path fill="#231F20" d="M96.544,567.994h-0.757v-2.943c0-0.611-0.172-1.007-0.895-1.007c-0.62,0-1.256,0.361-1.256,1.497v2.453
		h-0.757v-6.179h0.757v2.28h0.017c0.215-0.284,0.594-0.731,1.317-0.731c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M101.567,566.583c-0.026,0.215-0.232,0.852-0.8,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.299,0,1.936,1.032,1.936,2.625H98.34
		c0,0.938,0.439,1.479,1.308,1.479c0.714,0,1.136-0.551,1.162-0.886H101.567z M100.845,565.386
		c-0.043-0.697-0.336-1.343-1.257-1.343c-0.697,0-1.248,0.646-1.248,1.343H100.845z"/>
	<path fill="#231F20" d="M105.016,564.113c0-1.971,1.438-2.169,2.091-2.169c1.05,0,1.894,0.68,1.894,1.807
		c0,1.085-0.714,1.541-1.618,2.022l-0.628,0.345c-0.826,0.456-1.007,0.93-1.042,1.162h3.288v0.714h-4.14
		c0.043-1.257,0.611-1.928,1.42-2.401l0.8-0.465c0.646-0.37,1.11-0.619,1.11-1.411c0-0.481-0.31-1.093-1.196-1.093
		c-1.145,0-1.196,1.067-1.222,1.489H105.016z"/>
	<path fill="#231F20" d="M112.33,567.994h-0.809V563.7h-1.412v-0.603c0.981-0.069,1.386-0.164,1.626-1.153h0.594V567.994z"/>
	<path fill="#231F20" d="M116.758,564.775c-0.008-0.284-0.112-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.757,0.189c0.938,0.231,1.265,0.576,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.782,0-1.911-1.032-1.937-1.574h0.731c0.026,0.353,0.129,0.92,1.196,0.92c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.481-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H116.758z"/>
	<path fill="#231F20" d="M119.505,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.757v1.257h0.723v0.628H119.505z"/>
	<path fill="#231F20" d="M124.703,566.195l-0.646,1.799h-0.86l2.323-6.179h0.947l2.238,6.179h-0.912l-0.611-1.799H124.703z
		 M126.88,565.455l-0.912-2.719h-0.017l-0.99,2.719H126.88z"/>
	<path fill="#231F20" d="M133.141,567.994h-0.757v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.164-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M138.008,567.994h-0.757v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.164-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M142.115,567.994v-0.654l-0.017-0.017c-0.319,0.524-0.706,0.8-1.429,0.8c-0.663,0-1.463-0.318-1.463-1.394
		v-3.236h0.757v2.986c0,0.74,0.379,0.989,0.87,0.989c0.955,0,1.239-0.843,1.239-1.497v-2.479h0.757v4.501H142.115z"/>
	<path fill="#231F20" d="M144.032,564.861c0.034-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.257v2.59
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.129,0.034-0.241,0.086-0.413,0.086
		c-0.671,0-0.774-0.344-0.8-0.688c-0.292,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.411-0.524-1.411-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.344-0.731-1.059-0.731
		c-0.86,0-0.981,0.525-1.033,0.869H144.032z M146.803,565.696c-0.121,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.818,0.783c0.706,0,1.394-0.456,1.394-1.059V565.696z"/>
	<path fill="#231F20" d="M149.687,567.994h-0.757v-6.179h0.757V567.994z"/>
	<path fill="#231F20" d="M157.922,563.673c-0.232-1.084-1.136-1.282-1.729-1.282c-1.119,0-2.022,0.826-2.022,2.453
		c0,1.454,0.517,2.573,2.048,2.573c0.542,0,1.48-0.259,1.764-1.695h0.809c-0.344,2.332-2.238,2.435-2.703,2.435
		c-1.402,0-2.779-0.912-2.779-3.287c0-1.901,1.084-3.219,2.883-3.219c1.592,0,2.418,0.99,2.539,2.022H157.922z"/>
	<path fill="#231F20" d="M161.645,563.364c1.437,0,2.091,1.213,2.091,2.375s-0.654,2.375-2.091,2.375
		c-1.438,0-2.091-1.213-2.091-2.375S160.207,563.364,161.645,563.364z M161.645,567.46c1.084,0,1.308-1.119,1.308-1.722
		s-0.224-1.721-1.308-1.721s-1.308,1.118-1.308,1.721S160.56,567.46,161.645,567.46z"/>
	<path fill="#231F20" d="M168.371,567.994h-0.757v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.431,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.017c0.164-0.241,0.586-0.766,1.36-0.766c0.697,0,1.574,0.284,1.574,1.566V567.994z"/>
	<path fill="#231F20" d="M170.466,564.121v3.873h-0.757v-3.873h-0.62v-0.628h0.62v-0.774c0-0.68,0.43-0.99,1.17-0.99
		c0.112,0,0.225,0.009,0.345,0.018v0.68c-0.095-0.008-0.215-0.017-0.31-0.017c-0.327,0-0.448,0.163-0.448,0.524v0.56h0.758v0.628
		H170.466z"/>
	<path fill="#231F20" d="M175.708,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.206,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.159-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H175.708z M174.985,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H174.985z"/>
	<path fill="#231F20" d="M177.568,567.994h-0.757v-4.501h0.714v0.749h0.018c0.301-0.525,0.696-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V567.994z"/>
	<path fill="#231F20" d="M183.449,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.206,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.159-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H183.449z M182.725,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H182.725z"/>
	<path fill="#231F20" d="M188.162,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M192.219,565.007c-0.095-0.603-0.431-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.076-1.066h0.731c-0.078,0.714-0.517,1.721-1.833,1.721
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H192.219z"/>
	<path fill="#231F20" d="M197.661,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H197.661z M196.937,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H196.937z"/>
	<path fill="#231F20" d="M203.122,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S201.684,563.364,203.122,563.364z M203.122,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S202.038,567.46,203.122,567.46z"/>
	<path fill="#231F20" d="M207.036,564.121v3.873h-0.757v-3.873h-0.62v-0.628h0.62v-0.774c0-0.68,0.431-0.99,1.17-0.99
		c0.112,0,0.224,0.009,0.345,0.018v0.68c-0.095-0.008-0.215-0.017-0.31-0.017c-0.327,0-0.448,0.163-0.448,0.524v0.56h0.758v0.628
		H207.036z"/>
	<path fill="#231F20" d="M212.086,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H212.086z"/>
	<path fill="#231F20" d="M217.312,567.994h-0.758v-2.943c0-0.611-0.172-1.007-0.895-1.007c-0.62,0-1.257,0.361-1.257,1.497v2.453
		h-0.757v-6.179h0.757v2.28h0.018c0.215-0.284,0.594-0.731,1.316-0.731c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M222.335,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.206,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.159-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H222.335z M221.612,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H221.612z"/>
	<path fill="#231F20" d="M227.023,567.994h-0.835v-6.179h0.835V567.994z"/>
	<path fill="#231F20" d="M232.054,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M234.132,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H234.132z"/>
	<path fill="#231F20" d="M239.415,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H239.415z M238.691,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H238.691z"/>
	<path fill="#231F20" d="M241.274,567.994h-0.757v-4.501h0.714v0.749h0.018c0.301-0.525,0.696-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V567.994z"/>
	<path fill="#231F20" d="M247.074,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M248.238,564.861c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.257v2.59
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.774-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.412-0.524-1.412-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H248.238z M251.009,565.696c-0.12,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V565.696z"/>
	<path fill="#231F20" d="M253.96,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H253.96z"/>
	<path fill="#231F20" d="M255.539,562.675v-0.86h0.758v0.86H255.539z M256.296,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M259.365,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S257.927,563.364,259.365,563.364z M259.365,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S258.28,567.46,259.365,567.46z"/>
	<path fill="#231F20" d="M266.087,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M267.251,564.861c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.257v2.59
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.774-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.412-0.524-1.412-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H267.251z M270.023,565.696c-0.12,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V565.696z"/>
	<path fill="#231F20" d="M272.909,567.994h-0.758v-6.179h0.758V567.994z"/>
	<path fill="#231F20" d="M280.534,563.622c-0.043-0.955-0.835-1.256-1.532-1.256c-0.525,0-1.411,0.146-1.411,1.084
		c0,0.525,0.37,0.697,0.731,0.783l1.764,0.404c0.801,0.189,1.403,0.671,1.403,1.652c0,1.463-1.36,1.867-2.419,1.867
		c-1.145,0-1.592-0.344-1.867-0.594c-0.524-0.473-0.628-0.989-0.628-1.565h0.783c0,1.118,0.912,1.445,1.704,1.445
		c0.603,0,1.617-0.155,1.617-1.041c0-0.646-0.301-0.853-1.316-1.094l-1.265-0.292c-0.405-0.095-1.317-0.379-1.317-1.454
		c0-0.964,0.629-1.911,2.126-1.911c2.16,0,2.375,1.291,2.41,1.971H280.534z"/>
	<path fill="#231F20" d="M282.52,563.493h0.715v0.637h0.017c0.181-0.258,0.525-0.766,1.343-0.766c1.196,0,1.876,0.981,1.876,2.246
		c0,1.075-0.447,2.513-1.979,2.513c-0.603,0-0.998-0.284-1.196-0.603h-0.017v2.255h-0.758V563.493z M284.474,567.468
		c0.826,0,1.213-0.748,1.213-1.738c0-0.576-0.061-1.687-1.23-1.687c-1.093,0-1.214,1.179-1.214,1.91
		C283.243,567.15,283.992,567.468,284.474,567.468z"/>
	<path fill="#231F20" d="M291.283,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.206,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.159-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H291.283z M290.559,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H290.559z"/>
	<path fill="#231F20" d="M296.164,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.206,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.159-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H296.164z M295.44,565.386c-0.043-0.697-0.335-1.343-1.256-1.343
		c-0.697,0-1.248,0.646-1.248,1.343H295.44z"/>
	<path fill="#231F20" d="M300.075,565.007c-0.095-0.603-0.431-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.076-1.066h0.731c-0.078,0.714-0.517,1.721-1.833,1.721
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H300.075z"/>
	<path fill="#231F20" d="M305.382,567.994h-0.758v-2.943c0-0.611-0.172-1.007-0.895-1.007c-0.62,0-1.257,0.361-1.257,1.497v2.453
		h-0.757v-6.179h0.757v2.28h0.018c0.215-0.284,0.594-0.731,1.316-0.731c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M313.598,563.673c-0.232-1.084-1.136-1.282-1.729-1.282c-1.119,0-2.022,0.826-2.022,2.453
		c0,1.454,0.517,2.573,2.048,2.573c0.543,0,1.48-0.259,1.765-1.695h0.809c-0.344,2.332-2.237,2.435-2.702,2.435
		c-1.402,0-2.779-0.912-2.779-3.287c0-1.901,1.084-3.219,2.883-3.219c1.592,0,2.418,0.99,2.539,2.022H313.598z"/>
	<path fill="#231F20" d="M317.32,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S315.882,563.364,317.32,563.364z M317.32,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S316.235,567.46,317.32,567.46z"/>
	<path fill="#231F20" d="M320.38,563.493h0.714v0.637h0.018c0.163-0.241,0.551-0.766,1.359-0.766c0.81,0,1.067,0.49,1.196,0.731
		c0.379-0.422,0.68-0.731,1.377-0.731c0.482,0,1.395,0.249,1.395,1.515v3.115h-0.758v-2.909c0-0.619-0.189-1.041-0.835-1.041
		c-0.637,0-1.058,0.603-1.058,1.205v2.745h-0.758v-3.115c0-0.379-0.146-0.835-0.706-0.835c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757V563.493z"/>
	<path fill="#231F20" d="M327.642,563.493h0.714v0.637h0.018c0.163-0.241,0.551-0.766,1.359-0.766c0.81,0,1.067,0.49,1.196,0.731
		c0.379-0.422,0.68-0.731,1.377-0.731c0.482,0,1.395,0.249,1.395,1.515v3.115h-0.758v-2.909c0-0.619-0.189-1.041-0.835-1.041
		c-0.637,0-1.058,0.603-1.058,1.205v2.745h-0.758v-3.115c0-0.379-0.146-0.835-0.706-0.835c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757V563.493z"/>
	<path fill="#231F20" d="M337.809,567.994v-0.654l-0.018-0.017c-0.318,0.524-0.705,0.8-1.429,0.8c-0.662,0-1.463-0.318-1.463-1.394
		v-3.236h0.758v2.986c0,0.74,0.378,0.989,0.869,0.989c0.955,0,1.239-0.843,1.239-1.497v-2.479h0.757v4.501H337.809z"/>
	<path fill="#231F20" d="M343.404,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M344.612,562.675v-0.86h0.758v0.86H344.612z M345.37,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M349.45,565.007c-0.094-0.603-0.43-0.964-1.066-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.075-1.066h0.732c-0.078,0.714-0.517,1.721-1.834,1.721
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H349.45z"/>
	<path fill="#231F20" d="M351.065,564.861c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.67,0.104,1.67,1.257v2.59
		c0,0.189,0.094,0.267,0.249,0.267c0.069,0,0.163-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.775-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.627,0.731c-0.826,0-1.411-0.524-1.411-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H351.065z M353.836,565.696c-0.12,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V565.696z"/>
	<path fill="#231F20" d="M356.788,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H356.788z"/>
	<path fill="#231F20" d="M358.366,562.675v-0.86h0.758v0.86H358.366z M359.124,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M362.192,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S360.754,563.364,362.192,563.364z M362.192,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S361.108,567.46,362.192,567.46z"/>
	<path fill="#231F20" d="M368.914,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M373.765,566.195l-0.646,1.799h-0.861l2.324-6.179h0.946l2.237,6.179h-0.912l-0.61-1.799H373.765z
		 M375.942,565.455l-0.912-2.719h-0.018l-0.989,2.719H375.942z"/>
	<path fill="#231F20" d="M381.064,564.775c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.189c0.938,0.231,1.265,0.576,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.574h0.731c0.025,0.353,0.129,0.92,1.196,0.92c0.542,0,1.032-0.215,1.032-0.714
		c0-0.361-0.249-0.481-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H381.064z"/>
	<path fill="#231F20" d="M385.491,564.775c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.189c0.938,0.231,1.265,0.576,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.574h0.731c0.026,0.353,0.129,0.92,1.196,0.92c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.481-0.896-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H385.491z"/>
	<path fill="#231F20" d="M389.158,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S387.72,563.364,389.158,563.364z M389.158,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S388.073,567.46,389.158,567.46z"/>
	<path fill="#231F20" d="M395.077,565.007c-0.094-0.603-0.43-0.964-1.066-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.075-1.066h0.732c-0.078,0.714-0.517,1.721-1.834,1.721
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H395.077z"/>
	<path fill="#231F20" d="M396.736,562.675v-0.86h0.758v0.86H396.736z M397.494,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M398.685,564.861c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.257v2.59
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.774-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.412-0.524-1.412-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H398.685z M401.457,565.696c-0.12,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V565.696z"/>
	<path fill="#231F20" d="M404.408,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H404.408z"/>
	<path fill="#231F20" d="M405.986,562.675v-0.86h0.758v0.86H405.986z M406.744,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M409.812,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S408.375,563.364,409.812,563.364z M409.812,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S408.727,567.46,409.812,567.46z"/>
	<path fill="#231F20" d="M416.534,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M422.323,561.685c-0.731,1.377-1.17,2.203-1.17,4.269c0,1.421,0.49,2.471,1.179,3.821h-0.508
		c-0.896-1.343-1.48-2.418-1.48-4.027c0-1.515,0.525-2.736,1.455-4.063H422.323z"/>
	<path fill="#231F20" d="M424.441,567.994h-0.835v-6.179h0.835V567.994z"/>
	<path fill="#231F20" d="M430.006,561.815h0.81v6.179h-0.938l-3.141-4.991h-0.018v4.991h-0.809v-6.179h0.989l3.09,4.991h0.017
		V561.815z"/>
	<path fill="#231F20" d="M431.655,561.815h5.018v0.739h-2.091v5.439h-0.835v-5.439h-2.092V561.815z"/>
	<path fill="#231F20" d="M442.12,567.994h-4.562v-6.179h4.501v0.739h-3.666v1.894h3.382v0.74h-3.382v2.065h3.727V567.994z"/>
	<path fill="#231F20" d="M444.078,567.994h-0.835v-6.179h2.857c1.016,0,2.031,0.353,2.031,1.66c0,0.913-0.465,1.248-0.86,1.48
		c0.353,0.146,0.705,0.302,0.74,1.162l0.052,1.118c0.008,0.345,0.051,0.474,0.31,0.62v0.138h-1.024
		c-0.12-0.379-0.146-1.316-0.146-1.549c0-0.508-0.104-1.102-1.102-1.102h-2.022V567.994z M444.078,564.628h1.937
		c0.611,0,1.257-0.154,1.257-1.066c0-0.956-0.697-1.033-1.11-1.033h-2.083V564.628z"/>
	<path fill="#231F20" d="M453.111,563.622c-0.044-0.955-0.835-1.256-1.532-1.256c-0.525,0-1.411,0.146-1.411,1.084
		c0,0.525,0.37,0.697,0.731,0.783l1.764,0.404c0.801,0.189,1.403,0.671,1.403,1.652c0,1.463-1.36,1.867-2.419,1.867
		c-1.145,0-1.592-0.344-1.867-0.594c-0.524-0.473-0.628-0.989-0.628-1.565h0.783c0,1.118,0.912,1.445,1.704,1.445
		c0.602,0,1.617-0.155,1.617-1.041c0-0.646-0.301-0.853-1.316-1.094l-1.265-0.292c-0.405-0.095-1.317-0.379-1.317-1.454
		c0-0.964,0.629-1.911,2.126-1.911c2.16,0,2.375,1.291,2.409,1.971H453.111z"/>
	<path fill="#231F20" d="M456.1,567.994h-0.835v-6.179h2.779c1.145,0,1.833,0.739,1.833,1.738c0,0.86-0.49,1.833-1.833,1.833H456.1
		V567.994z M456.1,564.671h1.66c0.749,0,1.257-0.275,1.257-1.109c0-0.783-0.533-1.033-1.222-1.033H456.1V564.671z"/>
	<path fill="#231F20" d="M465.465,567.994h-4.562v-6.179h4.501v0.739h-3.666v1.894h3.382v0.74h-3.382v2.065h3.727V567.994z"/>
	<path fill="#231F20" d="M471.141,567.994h-4.562v-6.179h4.501v0.739h-3.666v1.894h3.382v0.74h-3.382v2.065h3.727V567.994z"/>
	<path fill="#231F20" d="M476.453,563.673c-0.232-1.084-1.136-1.282-1.729-1.282c-1.119,0-2.022,0.826-2.022,2.453
		c0,1.454,0.517,2.573,2.048,2.573c0.543,0,1.48-0.259,1.765-1.695h0.809c-0.344,2.332-2.237,2.435-2.702,2.435
		c-1.402,0-2.779-0.912-2.779-3.287c0-1.901,1.084-3.219,2.883-3.219c1.592,0,2.418,0.99,2.539,2.022H476.453z"/>
	<path fill="#231F20" d="M482.467,561.815h0.835v6.179h-0.835v-2.883h-3.228v2.883h-0.834v-6.179h0.834v2.556h3.228V561.815z"/>
	<path fill="#231F20" d="M487,564.113c0-1.971,1.437-2.169,2.091-2.169c1.05,0,1.894,0.68,1.894,1.807
		c0,1.085-0.714,1.541-1.618,2.022l-0.628,0.345c-0.826,0.456-1.007,0.93-1.041,1.162h3.287v0.714h-4.14
		c0.043-1.257,0.611-1.928,1.42-2.401l0.801-0.465c0.646-0.37,1.11-0.619,1.11-1.411c0-0.481-0.311-1.093-1.196-1.093
		c-1.145,0-1.196,1.067-1.223,1.489H487z"/>
	<path fill="#231F20" d="M493.842,568.157c-1.738,0-2.074-1.85-2.074-3.106c0-1.256,0.336-3.106,2.074-3.106
		s2.074,1.851,2.074,3.106C495.917,566.307,495.581,568.157,493.842,568.157z M493.842,562.624c-0.869,0-1.266,0.912-1.266,2.427
		s0.396,2.427,1.266,2.427s1.265-0.912,1.265-2.427S494.711,562.624,493.842,562.624z"/>
	<path fill="#231F20" d="M496.738,564.113c0-1.971,1.437-2.169,2.091-2.169c1.05,0,1.894,0.68,1.894,1.807
		c0,1.085-0.714,1.541-1.618,2.022l-0.628,0.345c-0.826,0.456-1.007,0.93-1.041,1.162h3.287v0.714h-4.14
		c0.043-1.257,0.611-1.928,1.42-2.401l0.801-0.465c0.646-0.37,1.11-0.619,1.11-1.411c0-0.481-0.311-1.093-1.196-1.093
		c-1.145,0-1.196,1.067-1.223,1.489H496.738z"/>
	<path fill="#231F20" d="M503.582,568.157c-1.738,0-2.074-1.85-2.074-3.106c0-1.256,0.336-3.106,2.074-3.106
		s2.074,1.851,2.074,3.106C505.656,566.307,505.32,568.157,503.582,568.157z M503.582,562.624c-0.869,0-1.266,0.912-1.266,2.427
		s0.396,2.427,1.266,2.427s1.265-0.912,1.265-2.427S504.451,562.624,503.582,562.624z"/>
	<path fill="#231F20" d="M506.559,569.775c0.732-1.377,1.171-2.203,1.171-4.269c0-1.42-0.49-2.47-1.179-3.821h0.508
		c0.895,1.343,1.479,2.419,1.479,4.028c0,1.515-0.524,2.736-1.454,4.062H506.559z"/>
	<path fill="#231F20" d="M510.977,567.994h-0.895v-0.912h0.895V567.994z"/>
	<path fill="#231F20" d="M518.68,567.994h-0.835v-6.179h0.835V567.994z"/>
	<path fill="#231F20" d="M523.833,563.622c-0.044-0.955-0.835-1.256-1.532-1.256c-0.525,0-1.411,0.146-1.411,1.084
		c0,0.525,0.37,0.697,0.731,0.783l1.764,0.404c0.801,0.189,1.403,0.671,1.403,1.652c0,1.463-1.36,1.867-2.419,1.867
		c-1.145,0-1.592-0.344-1.867-0.594c-0.524-0.473-0.628-0.989-0.628-1.565h0.783c0,1.118,0.912,1.445,1.704,1.445
		c0.602,0,1.617-0.155,1.617-1.041c0-0.646-0.301-0.853-1.316-1.094l-1.265-0.292c-0.405-0.095-1.317-0.379-1.317-1.454
		c0-0.964,0.629-1.911,2.126-1.911c2.16,0,2.375,1.291,2.409,1.971H523.833z"/>
	<path fill="#231F20" d="M529.615,563.622c-0.044-0.955-0.835-1.256-1.532-1.256c-0.525,0-1.411,0.146-1.411,1.084
		c0,0.525,0.37,0.697,0.731,0.783l1.764,0.404c0.801,0.189,1.403,0.671,1.403,1.652c0,1.463-1.36,1.867-2.419,1.867
		c-1.145,0-1.592-0.344-1.867-0.594c-0.524-0.473-0.628-0.989-0.628-1.565h0.783c0,1.118,0.912,1.445,1.704,1.445
		c0.602,0,1.617-0.155,1.617-1.041c0-0.646-0.301-0.853-1.316-1.094l-1.265-0.292c-0.405-0.095-1.317-0.379-1.317-1.454
		c0-0.964,0.629-1.911,2.126-1.911c2.16,0,2.375,1.291,2.409,1.971H529.615z"/>
	<path fill="#231F20" d="M535.819,561.815h0.81v6.179h-0.938l-3.141-4.991h-0.018v4.991h-0.809v-6.179h0.989l3.09,4.991h0.017
		V561.815z"/>
	<path fill="#231F20" d="M540.326,564.113c0-1.971,1.437-2.169,2.091-2.169c1.05,0,1.894,0.68,1.894,1.807
		c0,1.085-0.714,1.541-1.618,2.022l-0.628,0.345c-0.826,0.456-1.007,0.93-1.041,1.162h3.287v0.714h-4.14
		c0.043-1.257,0.611-1.928,1.42-2.401l0.801-0.465c0.646-0.37,1.11-0.619,1.11-1.411c0-0.481-0.311-1.093-1.196-1.093
		c-1.145,0-1.196,1.067-1.223,1.489H540.326z"/>
	<path fill="#231F20" d="M546.679,564.551c0.129,0.009,0.267,0.018,0.396,0.018c0.586,0,1.153-0.232,1.153-0.99
		c0-0.361-0.215-0.955-1.118-0.955c-1.076,0-1.145,0.878-1.18,1.3h-0.74c0-0.887,0.362-1.979,1.954-1.979
		c1.17,0,1.867,0.671,1.867,1.592c0,0.774-0.447,1.145-0.774,1.248v0.018c0.585,0.188,1.007,0.602,1.007,1.411
		c0,0.989-0.637,1.944-2.177,1.944c-0.448,0-0.826-0.111-1.119-0.267c-0.671-0.353-0.852-1.05-0.903-1.747h0.783
		c0.025,0.568,0.163,1.334,1.29,1.334c0.775,0,1.317-0.473,1.317-1.17c0-1.016-0.896-1.11-1.411-1.11
		c-0.112,0-0.232,0.009-0.345,0.009V564.551z"/>
	<path fill="#231F20" d="M552.11,568.157c-1.738,0-2.074-1.85-2.074-3.106c0-1.256,0.336-3.106,2.074-3.106s2.074,1.851,2.074,3.106
		C554.184,566.307,553.848,568.157,552.11,568.157z M552.11,562.624c-0.869,0-1.266,0.912-1.266,2.427s0.396,2.427,1.266,2.427
		s1.265-0.912,1.265-2.427S552.979,562.624,552.11,562.624z"/>
	<path fill="#231F20" d="M558.126,564.775c0.817,0.345,0.989,1.016,0.989,1.472c0,0.973-0.619,1.91-2.048,1.91
		c-0.336,0-0.973-0.086-1.455-0.464c-0.619-0.491-0.619-1.145-0.619-1.438c0-0.74,0.387-1.214,1.024-1.472
		c-0.517-0.198-0.818-0.611-0.818-1.179c0-0.629,0.388-1.661,1.833-1.661c1.343,0,1.868,0.852,1.868,1.558
		C558.901,564.371,558.393,564.637,558.126,564.775z M555.802,566.273c0,0.481,0.241,1.204,1.291,1.204
		c0.551,0,1.214-0.197,1.214-1.145c0-0.817-0.568-1.179-1.257-1.179C556.199,565.154,555.802,565.713,555.802,566.273z
		 M558.118,563.527c0-0.37-0.241-0.903-1.128-0.903c-0.809,0-1.007,0.56-1.007,0.946c0,0.577,0.508,0.93,1.059,0.93
		C557.696,564.5,558.118,564.078,558.118,563.527z"/>
	<path fill="#231F20" d="M562.188,565.998h-2.108v-0.775h2.108V565.998z"/>
	<path fill="#231F20" d="M566.499,566.522v1.472h-0.757v-1.472h-2.643v-0.74l2.771-3.838h0.628v3.924h0.887v0.654H566.499z
		 M563.796,565.868h1.945v-2.745h-0.018L563.796,565.868z"/>
	<path fill="#231F20" d="M569.096,564.474c0.259-0.198,0.629-0.379,1.171-0.379c0.981,0,1.988,0.688,1.988,1.937
		c0,0.671-0.302,2.125-2.195,2.125c-0.791,0-1.807-0.318-1.953-1.669h0.783c0.077,0.705,0.603,1.016,1.282,1.016
		c0.783,0,1.273-0.629,1.273-1.386c0-0.869-0.594-1.343-1.342-1.343c-0.439,0-0.835,0.207-1.119,0.577l-0.654-0.035l0.456-3.244
		h3.133v0.74h-2.564L569.096,564.474z"/>
	<path fill="#231F20" d="M572.882,562.073h4.183v0.663c-0.603,0.628-2.048,2.556-2.487,5.258h-0.835
		c0.207-1.661,1.309-3.83,2.471-5.181h-3.331V562.073z"/>
	<path fill="#231F20" d="M583.01,567.994h-1.032l-1.661-2.539l-1.713,2.539h-1.007l2.212-3.167l-2.074-3.012h1.051l1.565,2.366
		l1.566-2.366h0.999l-2.074,3.012L583.01,567.994z"/>
	<path fill="#231F20" d="M584.947,567.994h-0.895v-0.912h0.895V567.994z"/>
	<path fill="#231F20" d="M588.243,564.896c0-1.868,1.515-3.245,3.305-3.245c1.772,0,3.287,1.377,3.287,3.245
		c0,1.884-1.515,3.261-3.287,3.261C589.757,568.157,588.243,566.78,588.243,564.896z M591.547,567.615
		c1.472,0,2.633-1.153,2.633-2.719c0-1.541-1.161-2.703-2.633-2.703c-1.489,0-2.651,1.162-2.651,2.703
		C588.896,566.461,590.058,567.615,591.547,567.615z M593.259,565.506c-0.163,0.853-0.809,1.386-1.601,1.386
		c-1.17,0-1.919-0.878-1.919-2.005c0-1.145,0.714-1.988,1.885-1.988c0.826,0,1.506,0.474,1.626,1.352h-0.576
		c-0.104-0.474-0.499-0.783-1.041-0.783c-0.81,0-1.239,0.61-1.239,1.411c0,0.774,0.481,1.445,1.265,1.445
		c0.542,0,0.938-0.335,1.016-0.817H593.259z"/>
	<path fill="#231F20" d="M597.749,564.113c0-1.971,1.437-2.169,2.091-2.169c1.05,0,1.894,0.68,1.894,1.807
		c0,1.085-0.715,1.541-1.618,2.022l-0.628,0.345c-0.826,0.456-1.007,0.93-1.041,1.162h3.287v0.714h-4.14
		c0.043-1.257,0.611-1.928,1.42-2.401l0.801-0.465c0.646-0.37,1.11-0.619,1.11-1.411c0-0.481-0.311-1.093-1.196-1.093
		c-1.145,0-1.196,1.067-1.223,1.489H597.749z"/>
	<path fill="#231F20" d="M604.591,568.157c-1.738,0-2.074-1.85-2.074-3.106c0-1.256,0.336-3.106,2.074-3.106
		s2.074,1.851,2.074,3.106C606.666,566.307,606.33,568.157,604.591,568.157z M604.591,562.624c-0.869,0-1.266,0.912-1.266,2.427
		s0.396,2.427,1.266,2.427s1.265-0.912,1.265-2.427S605.46,562.624,604.591,562.624z"/>
	<path fill="#231F20" d="M607.487,564.113c0-1.971,1.437-2.169,2.091-2.169c1.05,0,1.894,0.68,1.894,1.807
		c0,1.085-0.715,1.541-1.618,2.022l-0.628,0.345c-0.826,0.456-1.007,0.93-1.041,1.162h3.287v0.714h-4.14
		c0.043-1.257,0.611-1.928,1.42-2.401l0.801-0.465c0.646-0.37,1.11-0.619,1.11-1.411c0-0.481-0.311-1.093-1.196-1.093
		c-1.145,0-1.196,1.067-1.223,1.489H607.487z"/>
	<path fill="#231F20" d="M614.331,568.157c-1.738,0-2.074-1.85-2.074-3.106c0-1.256,0.336-3.106,2.074-3.106
		s2.074,1.851,2.074,3.106C616.405,566.307,616.069,568.157,614.331,568.157z M614.331,562.624c-0.869,0-1.266,0.912-1.266,2.427
		s0.396,2.427,1.266,2.427s1.265-0.912,1.265-2.427S615.2,562.624,614.331,562.624z"/>
	<path fill="#231F20" d="M621.005,567.994h-0.835v-6.179h0.835V567.994z"/>
	<path fill="#231F20" d="M626.038,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M628.116,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H628.116z"/>
	<path fill="#231F20" d="M633.398,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.206,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.159-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H633.398z M632.674,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H632.674z"/>
	<path fill="#231F20" d="M635.257,567.994H634.5v-4.501h0.714v0.749h0.018c0.301-0.525,0.696-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V567.994z"/>
	<path fill="#231F20" d="M641.057,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M642.221,564.861c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.67,0.104,1.67,1.257v2.59
		c0,0.189,0.094,0.267,0.249,0.267c0.069,0,0.163-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.775-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.627,0.731c-0.826,0-1.411-0.524-1.411-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H642.221z M644.993,565.696c-0.12,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V565.696z"/>
	<path fill="#231F20" d="M647.944,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H647.944z"/>
	<path fill="#231F20" d="M649.522,562.675v-0.86h0.758v0.86H649.522z M650.28,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M653.348,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S651.911,563.364,653.348,563.364z M653.348,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S652.264,567.46,653.348,567.46z"/>
	<path fill="#231F20" d="M660.07,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M661.235,564.861c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.67,0.104,1.67,1.257v2.59
		c0,0.189,0.094,0.267,0.249,0.267c0.069,0,0.163-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.775-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.627,0.731c-0.826,0-1.411-0.524-1.411-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H661.235z M664.006,565.696c-0.12,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V565.696z"/>
	<path fill="#231F20" d="M666.892,567.994h-0.758v-6.179h0.758V567.994z"/>
	<path fill="#231F20" d="M674.517,563.622c-0.044-0.955-0.835-1.256-1.532-1.256c-0.525,0-1.411,0.146-1.411,1.084
		c0,0.525,0.37,0.697,0.731,0.783l1.764,0.404c0.801,0.189,1.403,0.671,1.403,1.652c0,1.463-1.36,1.867-2.419,1.867
		c-1.145,0-1.592-0.344-1.867-0.594c-0.524-0.473-0.628-0.989-0.628-1.565h0.783c0,1.118,0.912,1.445,1.704,1.445
		c0.602,0,1.617-0.155,1.617-1.041c0-0.646-0.301-0.853-1.316-1.094l-1.265-0.292c-0.405-0.095-1.317-0.379-1.317-1.454
		c0-0.964,0.629-1.911,2.126-1.911c2.16,0,2.375,1.291,2.409,1.971H674.517z"/>
	<path fill="#231F20" d="M676.503,563.493h0.715v0.637h0.017c0.181-0.258,0.525-0.766,1.343-0.766c1.196,0,1.876,0.981,1.876,2.246
		c0,1.075-0.447,2.513-1.979,2.513c-0.603,0-0.998-0.284-1.196-0.603h-0.017v2.255h-0.758V563.493z M678.458,567.468
		c0.826,0,1.213-0.748,1.213-1.738c0-0.576-0.061-1.687-1.23-1.687c-1.093,0-1.214,1.179-1.214,1.91
		C677.226,567.15,677.975,567.468,678.458,567.468z"/>
	<path fill="#231F20" d="M685.266,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H685.266z M684.542,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H684.542z"/>
	<path fill="#231F20" d="M690.147,566.583c-0.026,0.215-0.232,0.852-0.801,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H690.147z M689.423,565.386
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H689.423z"/>
	<path fill="#231F20" d="M694.057,565.007c-0.095-0.603-0.431-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.076-1.066h0.731c-0.078,0.714-0.517,1.721-1.833,1.721
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H694.057z"/>
	<path fill="#231F20" d="M699.365,567.994h-0.758v-2.943c0-0.611-0.172-1.007-0.895-1.007c-0.62,0-1.257,0.361-1.257,1.497v2.453
		h-0.757v-6.179h0.757v2.28h0.018c0.215-0.284,0.594-0.731,1.316-0.731c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M707.581,563.673c-0.232-1.084-1.136-1.282-1.729-1.282c-1.119,0-2.022,0.826-2.022,2.453
		c0,1.454,0.517,2.573,2.048,2.573c0.543,0,1.48-0.259,1.765-1.695h0.809c-0.344,2.332-2.237,2.435-2.702,2.435
		c-1.402,0-2.779-0.912-2.779-3.287c0-1.901,1.084-3.219,2.883-3.219c1.592,0,2.418,0.99,2.539,2.022H707.581z"/>
	<path fill="#231F20" d="M711.303,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S709.866,563.364,711.303,563.364z M711.303,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S710.219,567.46,711.303,567.46z"/>
	<path fill="#231F20" d="M714.363,563.493h0.714v0.637h0.018c0.163-0.241,0.551-0.766,1.359-0.766c0.81,0,1.067,0.49,1.196,0.731
		c0.379-0.422,0.68-0.731,1.377-0.731c0.482,0,1.395,0.249,1.395,1.515v3.115h-0.758v-2.909c0-0.619-0.189-1.041-0.835-1.041
		c-0.637,0-1.058,0.603-1.058,1.205v2.745h-0.758v-3.115c0-0.379-0.146-0.835-0.706-0.835c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757V563.493z"/>
	<path fill="#231F20" d="M721.625,563.493h0.714v0.637h0.018c0.163-0.241,0.551-0.766,1.359-0.766c0.81,0,1.067,0.49,1.196,0.731
		c0.379-0.422,0.68-0.731,1.377-0.731c0.482,0,1.395,0.249,1.395,1.515v3.115h-0.758v-2.909c0-0.619-0.189-1.041-0.834-1.041
		c-0.638,0-1.059,0.603-1.059,1.205v2.745h-0.758v-3.115c0-0.379-0.146-0.835-0.706-0.835c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757V563.493z"/>
	<path fill="#231F20" d="M731.792,567.994v-0.654l-0.018-0.017c-0.318,0.524-0.705,0.8-1.429,0.8c-0.662,0-1.463-0.318-1.463-1.394
		v-3.236h0.758v2.986c0,0.74,0.379,0.989,0.869,0.989c0.955,0,1.239-0.843,1.239-1.497v-2.479h0.757v4.501H731.792z"/>
	<path fill="#231F20" d="M737.387,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M738.595,562.675v-0.86h0.758v0.86H738.595z M739.353,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M743.434,565.007c-0.095-0.603-0.431-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.076-1.066h0.731c-0.078,0.714-0.517,1.721-1.833,1.721
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H743.434z"/>
	<path fill="#231F20" d="M745.047,564.861c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.67,0.104,1.67,1.257v2.59
		c0,0.189,0.094,0.267,0.249,0.267c0.069,0,0.163-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.775-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.627,0.731c-0.826,0-1.411-0.524-1.411-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H745.047z M747.819,565.696c-0.12,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V565.696z"/>
	<path fill="#231F20" d="M750.77,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H750.77z"/>
	<path fill="#231F20" d="M752.348,562.675v-0.86h0.758v0.86H752.348z M753.106,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M756.174,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S754.737,563.364,756.174,563.364z M756.174,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S755.09,567.46,756.174,567.46z"/>
	<path fill="#231F20" d="M762.897,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M767.748,566.195l-0.646,1.799h-0.861l2.324-6.179h0.946l2.237,6.179h-0.912l-0.61-1.799H767.748z
		 M769.924,565.455l-0.912-2.719h-0.018l-0.989,2.719H769.924z"/>
	<path fill="#231F20" d="M775.047,564.775c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.189c0.938,0.231,1.265,0.576,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.574h0.731c0.026,0.353,0.129,0.92,1.196,0.92c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.481-0.896-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H775.047z"/>
	<path fill="#231F20" d="M779.473,564.775c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.189c0.938,0.231,1.265,0.576,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.574h0.731c0.026,0.353,0.129,0.92,1.196,0.92c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.481-0.896-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H779.473z"/>
	<path fill="#231F20" d="M783.141,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S781.704,563.364,783.141,563.364z M783.141,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S782.057,567.46,783.141,567.46z"/>
	<path fill="#231F20" d="M789.059,565.007c-0.094-0.603-0.43-0.964-1.066-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.075-1.066h0.732c-0.078,0.714-0.517,1.721-1.834,1.721
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H789.059z"/>
	<path fill="#231F20" d="M790.718,562.675v-0.86h0.758v0.86H790.718z M791.476,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M792.668,564.861c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.67,0.104,1.67,1.257v2.59
		c0,0.189,0.094,0.267,0.249,0.267c0.069,0,0.163-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.775-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.627,0.731c-0.826,0-1.411-0.524-1.411-1.299
		c0-0.379,0.112-1.257,1.368-1.412l1.248-0.154c0.181-0.018,0.396-0.086,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H792.668z M795.44,565.696c-0.12,0.095-0.31,0.163-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V565.696z"/>
	<path fill="#231F20" d="M798.391,564.121v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H798.391z"/>
	<path fill="#231F20" d="M799.969,562.675v-0.86h0.758v0.86H799.969z M800.727,567.994h-0.758v-4.501h0.758V567.994z"/>
	<path fill="#231F20" d="M803.795,563.364c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S802.358,563.364,803.795,563.364z M803.795,567.46c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S802.711,567.46,803.795,567.46z"/>
	<path fill="#231F20" d="M810.517,567.994h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V567.994z"/>
	<path fill="#231F20" d="M812.727,567.994h-0.895v-0.912h0.895V567.994z"/>
	<path fill="#231F20" d="M30.2,576.514l-0.646,1.799h-0.86l2.324-6.179h0.946l2.238,6.179H33.29l-0.611-1.799H30.2z M32.377,575.774
		l-0.912-2.719h-0.017l-0.99,2.719H32.377z"/>
	<path fill="#231F20" d="M35.74,578.313h-0.757v-6.179h0.757V578.313z"/>
	<path fill="#231F20" d="M37.712,578.313h-0.757v-6.179h0.757V578.313z"/>
	<path fill="#231F20" d="M42.098,578.313h-0.757v-4.501h0.714v0.749h0.017c0.301-0.525,0.697-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V578.313z"/>
	<path fill="#231F20" d="M44.241,572.995v-0.86h0.757v0.86H44.241z M44.998,578.313h-0.757v-4.501h0.757V578.313z"/>
	<path fill="#231F20" d="M49.959,577.943c0,0.646-0.009,2.264-2.091,2.264c-0.534,0-1.575-0.146-1.721-1.343h0.757
		c0.138,0.714,0.835,0.714,1.016,0.714c1.308,0,1.282-1.05,1.282-1.583v-0.189h-0.017v0.034c-0.198,0.318-0.594,0.603-1.196,0.603
		c-1.532,0-1.979-1.437-1.979-2.513c0-1.266,0.68-2.246,1.876-2.246c0.818,0,1.162,0.508,1.343,0.766h0.017v-0.637h0.714V577.943z
		 M48.005,577.788c0.482,0,1.23-0.318,1.23-1.515c0-0.731-0.12-1.91-1.213-1.91c-1.17,0-1.23,1.11-1.23,1.687
		C46.792,577.04,47.179,577.788,48.005,577.788z"/>
	<path fill="#231F20" d="M54.794,578.313h-0.757v-2.943c0-0.611-0.172-1.007-0.895-1.007c-0.62,0-1.256,0.361-1.256,1.497v2.453
		h-0.757v-6.179h0.757v2.28h0.017c0.215-0.284,0.594-0.731,1.317-0.731c0.697,0,1.575,0.284,1.575,1.566V578.313z"/>
	<path fill="#231F20" d="M56.868,574.44v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97H55.5v-0.628h0.611v-1.257h0.757v1.257h0.723v0.628H56.868z"/>
	<path fill="#231F20" d="M60.986,575.094c-0.008-0.284-0.112-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.757,0.188c0.938,0.232,1.265,0.577,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.782,0-1.911-1.032-1.937-1.575h0.731c0.026,0.354,0.129,0.921,1.196,0.921c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.482-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H60.986z"/>
	<path fill="#231F20" d="M65.981,578.313h-0.757v-4.501h0.714v0.749h0.017c0.301-0.525,0.697-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V578.313z"/>
	<path fill="#231F20" d="M71.863,576.902c-0.026,0.215-0.232,0.852-0.8,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.299,0,1.937,1.032,1.937,2.625h-3.288
		c0,0.938,0.439,1.479,1.308,1.479c0.714,0,1.136-0.551,1.162-0.886H71.863z M71.14,575.706c-0.043-0.697-0.336-1.343-1.257-1.343
		c-0.697,0-1.248,0.646-1.248,1.343H71.14z"/>
	<path fill="#231F20" d="M75.47,575.094c-0.008-0.284-0.112-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.757,0.188c0.938,0.232,1.265,0.577,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.782,0-1.911-1.032-1.937-1.575h0.731c0.026,0.354,0.129,0.921,1.196,0.921c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.482-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H75.47z"/>
	<path fill="#231F20" d="M81.112,576.902c-0.026,0.215-0.232,0.852-0.8,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.299,0,1.937,1.032,1.937,2.625h-3.288
		c0,0.938,0.439,1.479,1.308,1.479c0.714,0,1.136-0.551,1.162-0.886H81.112z M80.389,575.706c-0.043-0.697-0.336-1.343-1.257-1.343
		c-0.697,0-1.248,0.646-1.248,1.343H80.389z"/>
	<path fill="#231F20" d="M82.972,578.313h-0.757v-4.501h0.714v0.749h0.017c0.301-0.525,0.697-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V578.313z"/>
	<path fill="#231F20" d="M88.185,573.812h0.826l-1.713,4.501h-0.809l-1.644-4.501h0.878l1.17,3.666h0.017L88.185,573.812z"/>
	<path fill="#231F20" d="M93.388,576.902c-0.026,0.215-0.232,0.852-0.8,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.299,0,1.937,1.032,1.937,2.625h-3.288
		c0,0.938,0.439,1.479,1.308,1.479c0.714,0,1.136-0.551,1.162-0.886H93.388z M92.666,575.706c-0.043-0.697-0.336-1.343-1.257-1.343
		c-0.697,0-1.248,0.646-1.248,1.343H92.666z"/>
	<path fill="#231F20" d="M98.2,578.313h-0.714v-0.62h-0.017c-0.327,0.611-0.843,0.749-1.282,0.749c-1.532,0-1.979-1.437-1.979-2.513
		c0-1.266,0.68-2.246,1.876-2.246c0.818,0,1.162,0.508,1.343,0.766l0.017-0.061v-2.254H98.2V578.313z M96.204,577.788
		c0.482,0,1.23-0.318,1.23-1.515c0-0.731-0.12-1.91-1.213-1.91c-1.17,0-1.23,1.11-1.23,1.687
		C94.99,577.04,95.377,577.788,96.204,577.788z"/>
	<path fill="#231F20" d="M100.374,578.313h-0.895v-0.912h0.895V578.313z"/>
	<path fill="#231F20" d="M107.477,578.313h-0.834v-6.179h4.277v0.74h-3.442v1.893h3.029v0.74h-3.029V578.313z"/>
	<path fill="#231F20" d="M113.503,573.683c1.437,0,2.091,1.213,2.091,2.375s-0.654,2.375-2.091,2.375
		c-1.438,0-2.091-1.213-2.091-2.375S112.065,573.683,113.503,573.683z M113.503,577.78c1.084,0,1.308-1.119,1.308-1.722
		s-0.224-1.721-1.308-1.721s-1.308,1.118-1.308,1.721S112.418,577.78,113.503,577.78z"/>
	<path fill="#231F20" d="M117.371,578.313h-0.757v-4.501h0.714v0.749h0.017c0.301-0.525,0.697-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V578.313z"/>
	<path fill="#231F20" d="M122.694,574.44v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.757v1.257h0.723v0.628H122.694z"/>
	<path fill="#231F20" d="M127.976,576.902c-0.026,0.215-0.232,0.852-0.8,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.299,0,1.937,1.032,1.937,2.625h-3.288
		c0,0.938,0.439,1.479,1.308,1.479c0.714,0,1.136-0.551,1.162-0.886H127.976z M127.253,575.706
		c-0.043-0.697-0.336-1.343-1.257-1.343c-0.697,0-1.248,0.646-1.248,1.343H127.253z"/>
	<path fill="#231F20" d="M131.887,575.327c-0.095-0.603-0.43-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.947-0.378,1.076-1.066h0.731c-0.078,0.714-0.516,1.721-1.833,1.721
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.179,0,1.626,0.86,1.687,1.644H131.887z"/>
	<path fill="#231F20" d="M137.195,578.313h-0.757v-2.943c0-0.611-0.172-1.007-0.895-1.007c-0.62,0-1.256,0.361-1.256,1.497v2.453
		h-0.757v-6.179h0.757v2.28h0.017c0.215-0.284,0.594-0.731,1.317-0.731c0.697,0,1.575,0.284,1.575,1.566V578.313z"/>
	<path fill="#231F20" d="M142.062,578.313h-0.757v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.164-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V578.313z"/>
	<path fill="#231F20" d="M143.271,572.995v-0.86h0.757v0.86H143.271z M144.029,578.313h-0.757v-4.501h0.757V578.313z"/>
	<path fill="#231F20" d="M148.109,575.327c-0.095-0.603-0.43-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.231,1.729c0.508,0,0.947-0.378,1.076-1.066h0.731c-0.077,0.714-0.516,1.721-1.833,1.721
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.16-2.504c1.179,0,1.626,0.86,1.687,1.644H148.109z"/>
	<path fill="#231F20" d="M149.724,575.18c0.034-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.256v2.591
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.129,0.034-0.241,0.086-0.413,0.086
		c-0.671,0-0.774-0.344-0.8-0.688c-0.292,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.412-0.524-1.412-1.3
		c0-0.378,0.112-1.256,1.369-1.411l1.248-0.154c0.181-0.018,0.396-0.087,0.396-0.534c0-0.473-0.344-0.731-1.059-0.731
		c-0.86,0-0.981,0.525-1.033,0.869H149.724z M152.495,576.015c-0.121,0.095-0.31,0.164-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.818,0.783c0.706,0,1.394-0.456,1.394-1.059V576.015z"/>
	<path fill="#231F20" d="M155.379,578.313h-0.757v-6.179h0.757V578.313z"/>
	<path fill="#231F20" d="M161.46,575.094c-0.008-0.284-0.112-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.757,0.188c0.938,0.232,1.265,0.577,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.782,0-1.911-1.032-1.937-1.575h0.731c0.026,0.354,0.129,0.921,1.196,0.921c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.482-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H161.46z"/>
	<path fill="#231F20" d="M166.202,578.313v-0.654l-0.017-0.017c-0.318,0.524-0.706,0.8-1.429,0.8c-0.663,0-1.463-0.318-1.463-1.394
		v-3.236h0.757v2.986c0,0.74,0.379,0.989,0.869,0.989c0.956,0,1.239-0.843,1.239-1.497v-2.479h0.758v4.501H166.202z"/>
	<path fill="#231F20" d="M168.098,573.812h0.715v0.637h0.017c0.181-0.258,0.525-0.766,1.343-0.766c1.196,0,1.876,0.98,1.876,2.246
		c0,1.076-0.447,2.513-1.979,2.513c-0.603,0-0.998-0.284-1.196-0.603h-0.017v2.255h-0.758V573.812z M170.052,577.788
		c0.826,0,1.213-0.748,1.213-1.738c0-0.576-0.061-1.687-1.23-1.687c-1.093,0-1.214,1.179-1.214,1.91
		C168.821,577.469,169.57,577.788,170.052,577.788z"/>
	<path fill="#231F20" d="M173.011,573.812h0.715v0.637h0.017c0.181-0.258,0.525-0.766,1.343-0.766c1.196,0,1.876,0.98,1.876,2.246
		c0,1.076-0.447,2.513-1.979,2.513c-0.603,0-0.998-0.284-1.196-0.603h-0.017v2.255h-0.758V573.812z M174.965,577.788
		c0.826,0,1.213-0.748,1.213-1.738c0-0.576-0.061-1.687-1.23-1.687c-1.093,0-1.214,1.179-1.214,1.91
		C173.734,577.469,174.483,577.788,174.965,577.788z"/>
	<path fill="#231F20" d="M179.806,573.683c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S178.369,573.683,179.806,573.683z M179.806,577.78c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S178.721,577.78,179.806,577.78z"/>
	<path fill="#231F20" d="M183.673,578.313h-0.757v-4.501h0.714v0.749h0.018c0.301-0.525,0.696-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V578.313z"/>
	<path fill="#231F20" d="M186.943,574.44v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H186.943z"/>
	<path fill="#231F20" d="M190.734,573.812h0.715v0.637h0.017c0.181-0.258,0.525-0.766,1.343-0.766c1.196,0,1.876,0.98,1.876,2.246
		c0,1.076-0.447,2.513-1.979,2.513c-0.603,0-0.998-0.284-1.196-0.603h-0.017v2.255h-0.758V573.812z M192.688,577.788
		c0.826,0,1.213-0.748,1.213-1.738c0-0.576-0.061-1.687-1.23-1.687c-1.093,0-1.214,1.179-1.214,1.91
		C191.457,577.469,192.206,577.788,192.688,577.788z"/>
	<path fill="#231F20" d="M196.454,578.313h-0.758v-6.179h0.758V578.313z"/>
	<path fill="#231F20" d="M201.5,576.902c-0.026,0.215-0.232,0.852-0.801,1.23c-0.207,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H201.5z M200.777,575.706c-0.043-0.697-0.335-1.343-1.256-1.343
		c-0.697,0-1.248,0.646-1.248,1.343H200.777z"/>
	<path fill="#231F20" d="M202.5,575.18c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.256v2.591
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.774-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.412-0.524-1.412-1.3
		c0-0.378,0.112-1.256,1.368-1.411l1.248-0.154c0.181-0.018,0.396-0.087,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H202.5z M205.271,576.015c-0.12,0.095-0.31,0.164-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V576.015z"/>
	<path fill="#231F20" d="M209.953,575.094c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.188c0.938,0.232,1.265,0.577,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.575h0.731c0.025,0.354,0.129,0.921,1.196,0.921c0.542,0,1.032-0.215,1.032-0.714
		c0-0.361-0.249-0.482-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H209.953z"/>
	<path fill="#231F20" d="M215.595,576.902c-0.026,0.215-0.232,0.852-0.801,1.23c-0.206,0.138-0.499,0.31-1.222,0.31
		c-1.266,0-2.014-0.955-2.014-2.255c0-1.394,0.671-2.504,2.159-2.504c1.3,0,1.937,1.032,1.937,2.625h-3.287
		c0,0.938,0.438,1.479,1.308,1.479c0.715,0,1.137-0.551,1.162-0.886H215.595z M214.872,575.706
		c-0.043-0.697-0.335-1.343-1.256-1.343c-0.697,0-1.248,0.646-1.248,1.343H214.872z"/>
	<path fill="#231F20" d="M221.76,575.327c-0.095-0.603-0.431-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.076-1.066h0.731c-0.078,0.714-0.517,1.721-1.833,1.721
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H221.76z"/>
	<path fill="#231F20" d="M225.226,573.683c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S223.789,573.683,225.226,573.683z M225.226,577.78c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S224.141,577.78,225.226,577.78z"/>
	<path fill="#231F20" d="M231.948,578.313h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V578.313z"/>
	<path fill="#231F20" d="M234.022,574.44v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H234.022z"/>
	<path fill="#231F20" d="M235.579,575.18c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.67,0.104,1.67,1.256v2.591
		c0,0.189,0.094,0.267,0.249,0.267c0.069,0,0.163-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.775-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.627,0.731c-0.826,0-1.411-0.524-1.411-1.3
		c0-0.378,0.112-1.256,1.368-1.411l1.248-0.154c0.181-0.018,0.396-0.087,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H235.579z M238.35,576.015c-0.12,0.095-0.31,0.164-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V576.015z"/>
	<path fill="#231F20" d="M243.247,575.327c-0.094-0.603-0.43-0.964-1.066-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.075-1.066h0.732c-0.078,0.714-0.517,1.721-1.834,1.721
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H243.247z"/>
	<path fill="#231F20" d="M245.818,574.44v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.491,0.061-0.568,0.061
		c-0.749,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H245.818z"/>
	<path fill="#231F20" d="M254.101,573.993c-0.232-1.084-1.136-1.282-1.729-1.282c-1.119,0-2.022,0.826-2.022,2.453
		c0,1.454,0.517,2.573,2.048,2.573c0.543,0,1.48-0.259,1.765-1.696h0.809c-0.344,2.333-2.237,2.436-2.702,2.436
		c-1.402,0-2.779-0.912-2.779-3.287c0-1.902,1.084-3.219,2.883-3.219c1.592,0,2.418,0.99,2.539,2.022H254.101z"/>
	<path fill="#231F20" d="M255.896,575.18c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.256v2.591
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.774-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.412-0.524-1.412-1.3
		c0-0.378,0.112-1.256,1.368-1.411l1.248-0.154c0.181-0.018,0.396-0.087,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H255.896z M258.667,576.015c-0.12,0.095-0.31,0.164-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V576.015z"/>
	<path fill="#231F20" d="M263.638,578.313v-0.654l-0.018-0.017c-0.318,0.524-0.705,0.8-1.429,0.8c-0.662,0-1.463-0.318-1.463-1.394
		v-3.236h0.758v2.986c0,0.74,0.379,0.989,0.869,0.989c0.955,0,1.239-0.843,1.239-1.497v-2.479h0.757v4.501H263.638z"/>
	<path fill="#231F20" d="M268.138,575.094c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.188c0.938,0.232,1.265,0.577,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.575h0.731c0.025,0.354,0.129,0.921,1.196,0.921c0.542,0,1.032-0.215,1.032-0.714
		c0-0.361-0.249-0.482-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H268.138z"/>
	<path fill="#231F20" d="M269.966,575.18c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.256v2.591
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.774-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.412-0.524-1.412-1.3
		c0-0.378,0.112-1.256,1.368-1.411l1.248-0.154c0.181-0.018,0.396-0.087,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H269.966z M272.738,576.015c-0.12,0.095-0.31,0.164-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V576.015z"/>
	<path fill="#231F20" d="M275.622,578.313h-0.758v-6.179h0.758V578.313z"/>
	<path fill="#231F20" d="M280.163,578.313h-0.835v-6.179h2.779c1.145,0,1.833,0.74,1.833,1.738c0,0.86-0.49,1.833-1.833,1.833
		h-1.944V578.313z M280.163,574.991h1.66c0.749,0,1.257-0.275,1.257-1.109c0-0.783-0.533-1.033-1.222-1.033h-1.695V574.991z"/>
	<path fill="#231F20" d="M285.668,578.313h-0.757v-4.501h0.714v0.749h0.018c0.301-0.525,0.696-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V578.313z"/>
	<path fill="#231F20" d="M289.582,573.683c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S288.144,573.683,289.582,573.683z M289.582,577.78c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S288.497,577.78,289.582,577.78z"/>
	<path fill="#231F20" d="M296.401,578.313h-0.714v-0.62h-0.018c-0.327,0.611-0.843,0.749-1.282,0.749
		c-1.531,0-1.979-1.437-1.979-2.513c0-1.266,0.681-2.246,1.877-2.246c0.817,0,1.161,0.508,1.342,0.766l0.018-0.061v-2.254h0.757
		V578.313z M294.405,577.788c0.481,0,1.23-0.318,1.23-1.515c0-0.731-0.12-1.91-1.213-1.91c-1.171,0-1.231,1.11-1.231,1.687
		C293.191,577.04,293.579,577.788,294.405,577.788z"/>
	<path fill="#231F20" d="M300.488,578.313v-0.654l-0.018-0.017c-0.318,0.524-0.705,0.8-1.429,0.8c-0.662,0-1.463-0.318-1.463-1.394
		v-3.236h0.758v2.986c0,0.74,0.379,0.989,0.869,0.989c0.955,0,1.239-0.843,1.239-1.497v-2.479h0.757v4.501H300.488z"/>
	<path fill="#231F20" d="M305.28,575.327c-0.094-0.603-0.43-0.964-1.066-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.075-1.066h0.732c-0.078,0.714-0.517,1.721-1.834,1.721
		c-1.265,0-2.014-0.955-2.014-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H305.28z"/>
	<path fill="#231F20" d="M307.85,574.44v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H307.85z"/>
	<path fill="#231F20" d="M309.428,572.995v-0.86h0.758v0.86H309.428z M310.186,578.313h-0.758v-4.501h0.758V578.313z"/>
	<path fill="#231F20" d="M313.254,573.683c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S311.817,573.683,313.254,573.683z M313.254,577.78c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S312.17,577.78,313.254,577.78z"/>
	<path fill="#231F20" d="M319.976,578.313h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V578.313z"/>
	<path fill="#231F20" d="M323.739,575.094c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.188c0.938,0.232,1.265,0.577,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.575h0.731c0.026,0.354,0.129,0.921,1.196,0.921c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.482-0.896-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H323.739z"/>
	<path fill="#231F20" d="M329.878,572.004c-0.731,1.377-1.17,2.203-1.17,4.269c0,1.42,0.49,2.471,1.179,3.821h-0.508
		c-0.895-1.343-1.48-2.418-1.48-4.027c0-1.515,0.525-2.736,1.455-4.063H329.878z"/>
	<path fill="#231F20" d="M330.875,572.995v-0.86h0.758v0.86H330.875z M331.633,578.313h-0.758v-4.501h0.758V578.313z"/>
	<path fill="#231F20" d="M336.507,578.313h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V578.313z"/>
	<path fill="#231F20" d="M338.602,574.44v3.873h-0.757v-3.873h-0.62v-0.628h0.62v-0.774c0-0.68,0.431-0.989,1.17-0.989
		c0.112,0,0.224,0.008,0.345,0.017v0.68c-0.095-0.009-0.215-0.017-0.31-0.017c-0.327,0-0.448,0.163-0.448,0.524v0.56h0.758v0.628
		H338.602z"/>
	<path fill="#231F20" d="M341.876,573.683c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S340.439,573.683,341.876,573.683z M341.876,577.78c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S340.792,577.78,341.876,577.78z"/>
	<path fill="#231F20" d="M350.002,573.528h0.585c-0.327,1.205-0.774,2.573-0.774,2.806c0,0.164,0.043,0.25,0.181,0.25
		c0.585,0,1.222-0.938,1.222-1.859c0-1.377-1.102-2.22-2.4-2.22c-1.532,0-2.616,1.23-2.616,2.754c0,1.522,1.17,2.685,2.676,2.685
		c0.801,0,1.602-0.379,2.092-0.981h0.576c-0.56,0.938-1.583,1.515-2.685,1.515c-1.842,0-3.288-1.437-3.288-3.27
		c0-1.799,1.472-3.236,3.245-3.236c1.592,0,2.96,1.119,2.96,2.686c0,1.558-1.291,2.486-2.057,2.486
		c-0.293,0-0.533-0.172-0.551-0.542l-0.018,0.009c-0.249,0.267-0.628,0.533-1.032,0.533c-0.74,0-1.266-0.628-1.266-1.377
		c0-1.161,0.792-2.375,1.988-2.375c0.413,0,0.766,0.189,0.99,0.672L350.002,573.528z M348.858,574.01c-0.766,0-1.3,0.964-1.3,1.678
		c0,0.491,0.293,0.835,0.723,0.835c0.731,0,1.282-1.033,1.282-1.721C349.563,574.38,349.237,574.01,348.858,574.01z"/>
	<path fill="#231F20" d="M356.463,575.327c-0.095-0.603-0.431-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.076-1.066h0.731c-0.078,0.714-0.517,1.721-1.833,1.721
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H356.463z"/>
	<path fill="#231F20" d="M358.077,575.18c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.67,0.104,1.67,1.256v2.591
		c0,0.189,0.094,0.267,0.249,0.267c0.069,0,0.163-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.775-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.627,0.731c-0.826,0-1.411-0.524-1.411-1.3
		c0-0.378,0.112-1.256,1.368-1.411l1.248-0.154c0.181-0.018,0.396-0.087,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H358.077z M360.848,576.015c-0.12,0.095-0.31,0.164-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V576.015z"/>
	<path fill="#231F20" d="M365.819,578.313v-0.654l-0.018-0.017c-0.318,0.524-0.705,0.8-1.429,0.8c-0.662,0-1.463-0.318-1.463-1.394
		v-3.236h0.758v2.986c0,0.74,0.378,0.989,0.869,0.989c0.955,0,1.239-0.843,1.239-1.497v-2.479h0.757v4.501H365.819z"/>
	<path fill="#231F20" d="M370.32,575.094c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.188c0.938,0.232,1.265,0.577,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.575h0.731c0.025,0.354,0.129,0.921,1.196,0.921c0.542,0,1.032-0.215,1.032-0.714
		c0-0.361-0.249-0.482-0.895-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H370.32z"/>
	<path fill="#231F20" d="M372.148,575.18c0.035-1.075,0.749-1.497,1.859-1.497c0.361,0,1.669,0.104,1.669,1.256v2.591
		c0,0.189,0.095,0.267,0.25,0.267c0.069,0,0.164-0.017,0.25-0.034v0.551c-0.13,0.034-0.241,0.086-0.413,0.086
		c-0.672,0-0.774-0.344-0.801-0.688c-0.293,0.318-0.749,0.731-1.626,0.731c-0.826,0-1.412-0.524-1.412-1.3
		c0-0.378,0.112-1.256,1.368-1.411l1.248-0.154c0.181-0.018,0.396-0.087,0.396-0.534c0-0.473-0.345-0.731-1.059-0.731
		c-0.861,0-0.981,0.525-1.033,0.869H372.148z M374.919,576.015c-0.12,0.095-0.31,0.164-1.248,0.284
		c-0.37,0.052-0.964,0.163-0.964,0.731c0,0.49,0.25,0.783,0.817,0.783c0.706,0,1.395-0.456,1.395-1.059V576.015z"/>
	<path fill="#231F20" d="M377.802,578.313h-0.758v-6.179h0.758V578.313z"/>
	<path fill="#231F20" d="M378.978,573.812h0.715v0.637h0.017c0.181-0.258,0.525-0.766,1.343-0.766c1.196,0,1.876,0.98,1.876,2.246
		c0,1.076-0.447,2.513-1.979,2.513c-0.603,0-0.998-0.284-1.196-0.603h-0.017v2.255h-0.758V573.812z M380.932,577.788
		c0.826,0,1.213-0.748,1.213-1.738c0-0.576-0.061-1.687-1.23-1.687c-1.093,0-1.214,1.179-1.214,1.91
		C379.701,577.469,380.45,577.788,380.932,577.788z"/>
	<path fill="#231F20" d="M384.735,578.313h-0.757v-4.501h0.714v0.749h0.018c0.301-0.525,0.696-0.878,1.265-0.878
		c0.095,0,0.138,0.009,0.198,0.025v0.783h-0.284c-0.706,0-1.153,0.551-1.153,1.205V578.313z"/>
	<path fill="#231F20" d="M388.648,573.683c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S387.21,573.683,388.648,573.683z M388.648,577.78c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S387.563,577.78,388.648,577.78z"/>
	<path fill="#231F20" d="M395.466,578.313h-0.714v-0.62h-0.018c-0.326,0.611-0.843,0.749-1.282,0.749
		c-1.531,0-1.979-1.437-1.979-2.513c0-1.266,0.681-2.246,1.877-2.246c0.817,0,1.161,0.508,1.342,0.766l0.018-0.061v-2.254h0.757
		V578.313z M393.47,577.788c0.481,0,1.23-0.318,1.23-1.515c0-0.731-0.12-1.91-1.213-1.91c-1.171,0-1.231,1.11-1.231,1.687
		C392.256,577.04,392.644,577.788,393.47,577.788z"/>
	<path fill="#231F20" d="M399.554,578.313v-0.654l-0.018-0.017c-0.318,0.524-0.705,0.8-1.429,0.8c-0.662,0-1.463-0.318-1.463-1.394
		v-3.236h0.758v2.986c0,0.74,0.379,0.989,0.869,0.989c0.955,0,1.239-0.843,1.239-1.497v-2.479h0.757v4.501H399.554z"/>
	<path fill="#231F20" d="M404.346,575.327c-0.095-0.603-0.431-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.076-1.066h0.731c-0.078,0.714-0.517,1.721-1.833,1.721
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H404.346z"/>
	<path fill="#231F20" d="M406.917,574.44v2.927c0,0.353,0.301,0.353,0.456,0.353h0.267v0.594c-0.275,0.025-0.49,0.061-0.568,0.061
		c-0.748,0-0.912-0.422-0.912-0.964v-2.97h-0.611v-0.628h0.611v-1.257h0.758v1.257h0.723v0.628H406.917z"/>
	<path fill="#231F20" d="M408.495,572.995v-0.86h0.758v0.86H408.495z M409.252,578.313h-0.758v-4.501h0.758V578.313z"/>
	<path fill="#231F20" d="M412.321,573.683c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S410.883,573.683,412.321,573.683z M412.321,577.78c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		s-1.309,1.118-1.309,1.721S411.237,577.78,412.321,577.78z"/>
	<path fill="#231F20" d="M419.042,578.313h-0.758v-2.771c0-0.783-0.224-1.179-0.964-1.179c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757v-4.501h0.714v0.637h0.018c0.163-0.241,0.585-0.766,1.359-0.766c0.697,0,1.575,0.284,1.575,1.566V578.313z"/>
	<path fill="#231F20" d="M422.805,575.094c-0.009-0.284-0.111-0.757-1.067-0.757c-0.232,0-0.895,0.077-0.895,0.637
		c0,0.37,0.232,0.456,0.817,0.603l0.758,0.188c0.938,0.232,1.265,0.577,1.265,1.188c0,0.929-0.766,1.488-1.781,1.488
		c-1.781,0-1.911-1.032-1.937-1.575h0.731c0.026,0.354,0.129,0.921,1.196,0.921c0.542,0,1.033-0.215,1.033-0.714
		c0-0.361-0.25-0.482-0.896-0.646l-0.878-0.215c-0.628-0.155-1.041-0.474-1.041-1.093c0-0.99,0.817-1.438,1.704-1.438
		c1.609,0,1.721,1.188,1.721,1.411H422.805z"/>
	<path fill="#231F20" d="M425.669,578.313h-0.895v-0.912h0.895V578.313z"/>
	<path fill="#231F20" d="M429.756,575.327c-0.095-0.603-0.431-0.964-1.067-0.964c-0.938,0-1.239,0.896-1.239,1.695
		c0,0.774,0.189,1.729,1.23,1.729c0.508,0,0.946-0.378,1.076-1.066h0.731c-0.078,0.714-0.517,1.721-1.833,1.721
		c-1.266,0-2.015-0.955-2.015-2.255c0-1.394,0.672-2.504,2.16-2.504c1.18,0,1.627,0.86,1.688,1.644H429.756z"/>
	<path fill="#231F20" d="M433.222,573.683c1.438,0,2.091,1.213,2.091,2.375s-0.653,2.375-2.091,2.375s-2.092-1.213-2.092-2.375
		S431.785,573.683,433.222,573.683z M433.222,577.78c1.084,0,1.308-1.119,1.308-1.722s-0.224-1.721-1.308-1.721
		c-1.085,0-1.309,1.118-1.309,1.721S432.137,577.78,433.222,577.78z"/>
	<path fill="#231F20" d="M436.278,573.812h0.714v0.637h0.018c0.163-0.241,0.551-0.766,1.359-0.766c0.81,0,1.067,0.49,1.196,0.731
		c0.379-0.422,0.68-0.731,1.377-0.731c0.482,0,1.395,0.249,1.395,1.515v3.115h-0.758v-2.909c0-0.619-0.189-1.041-0.834-1.041
		c-0.638,0-1.059,0.603-1.059,1.205v2.745h-0.758v-3.115c0-0.379-0.146-0.835-0.706-0.835c-0.43,0-1.188,0.275-1.188,1.497v2.453
		h-0.757V573.812z"/>
	<path fill="#231F20" d="M443.333,580.094c0.732-1.377,1.171-2.203,1.171-4.269c0-1.42-0.49-2.47-1.179-3.821h0.508
		c0.895,1.343,1.479,2.419,1.479,4.028c0,1.515-0.524,2.736-1.454,4.062H443.333z"/>
	<path fill="#231F20" d="M447.75,578.313h-0.895v-0.912h0.895V578.313z"/>
</g>
</svg>
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
	 width="100%" height="100%" viewBox="0 0 52.866 51.739" enable-background="new 0 0 52.866 51.739" xml:space="preserve">
<polygon fill-rule="evenodd" clip-rule="evenodd" fill="#B2B1B1" points="44.415,3.704 14.633,3.704 14.633,51.739 52.866,51.739 
	52.866,11.997 44.415,3.704 44.415,3.704 "/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E20917" d="M42.481,1.85c0,2.811,0,5.655,0,6.226c0.576,0,3.471,0,6.308,0
	L42.481,1.85L42.481,1.85L42.481,1.85z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#F5F5F5" d="M41.175,1.307c-10.689,0-27.428,0-28.284,0
	c0,1.255,0,46.237,0,47.492c1.24,0,35.794,0,37.034,0c0-0.935,0-26.096,0-39.417h-8.75V1.307L41.175,1.307L41.175,1.307z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M42.481,1.85l6.308,6.226c-2.837,0-5.731,0-6.308,0
	C42.481,7.505,42.481,4.66,42.481,1.85L42.481,1.85L42.481,1.85z M49.925,48.799c-1.24,0-35.794,0-37.034,0
	c0-1.255,0-46.236,0-47.492c0.856,0,17.595,0,28.284,0v8.075h8.75C49.925,22.703,49.925,47.864,49.925,48.799L49.925,48.799
	L49.925,48.799L49.925,48.799z M11.583,0v50.105h39.649V8.65L42.467,0H11.583L11.583,0L11.583,0L11.583,0z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#B2B1B1" d="M39.015,19.902V5.337H12.891c0,3.47,0,8.805,0,14.565H39.015
	L39.015,19.902L39.015,19.902z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E20917" d="M1.307,16.936c1.238,0,33.62,0,34.857,0c0-1.12,0-10.861,0-11.981
	c-1.237,0-33.619,0-34.857,0C1.307,6.075,1.307,15.816,1.307,16.936L1.307,16.936L1.307,16.936z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M36.164,16.936c-1.237,0-33.619,0-34.857,0
	c0-1.12,0-10.861,0-11.981c1.238,0,33.62,0,34.857,0C36.164,6.075,36.164,15.816,36.164,16.936L36.164,16.936L36.164,16.936z
	 M0,3.647v14.596h37.471V3.647h-0.653H0L0,3.647L0,3.647L0,3.647z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M9.905,8.311v2.267h0.881c0.635,0,1.059-0.042,1.272-0.125
	c0.214-0.083,0.382-0.214,0.503-0.392c0.122-0.178,0.183-0.385,0.183-0.621c0-0.291-0.086-0.53-0.256-0.72
	c-0.17-0.188-0.386-0.307-0.647-0.354c-0.191-0.037-0.578-0.055-1.158-0.055H9.905L9.905,8.311L9.905,8.311z M8.292,14.928V6.963
	h2.583c0.979,0,1.616,0.04,1.914,0.12c0.456,0.12,0.839,0.38,1.146,0.78c0.309,0.401,0.463,0.918,0.463,1.552
	c0,0.49-0.089,0.901-0.267,1.234c-0.177,0.333-0.402,0.595-0.676,0.786c-0.273,0.19-0.552,0.316-0.834,0.377
	c-0.385,0.077-0.94,0.114-1.668,0.114H9.905v3.002H8.292L8.292,14.928L8.292,14.928L8.292,14.928z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M17.315,8.311v5.27h1.195c0.447,0,0.77-0.025,0.968-0.076
	c0.26-0.065,0.475-0.175,0.646-0.331c0.171-0.156,0.311-0.412,0.419-0.769c0.107-0.356,0.162-0.842,0.162-1.457
	s-0.055-1.087-0.162-1.416c-0.108-0.33-0.26-0.586-0.454-0.771c-0.195-0.185-0.441-0.31-0.741-0.375
	c-0.224-0.05-0.661-0.076-1.313-0.076H17.315L17.315,8.311L17.315,8.311z M15.702,6.963h2.931c0.661,0,1.165,0.05,1.512,0.152
	c0.467,0.138,0.865,0.382,1.197,0.733c0.332,0.352,0.585,0.782,0.759,1.29c0.173,0.509,0.26,1.137,0.26,1.883
	c0,0.656-0.081,1.221-0.244,1.695c-0.198,0.58-0.481,1.049-0.851,1.408c-0.277,0.271-0.653,0.483-1.126,0.635
	c-0.354,0.113-0.827,0.169-1.42,0.169h-3.018V6.963L15.702,6.963L15.702,6.963L15.702,6.963z"/>
<polygon fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" points="23.727,14.928 23.727,6.963 29.18,6.963 29.18,8.311 
	25.34,8.311 25.34,10.19 28.648,10.19 28.648,11.538 25.34,11.538 25.34,14.928 23.727,14.928 23.727,14.928 "/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E30921" d="M25.983,35.519c-2.812,2.1-4.745,5.082-3.982,5.547l-0.666-0.335
	C20.948,40.259,21.825,37.729,25.983,35.519L25.983,35.519L25.983,35.519L25.983,35.519L25.983,35.519z"/>
<path fill="none" stroke="#E30921" stroke-width="0.5197" stroke-miterlimit="2.6131" d="M25.983,35.519
	c-2.812,2.1-4.745,5.082-3.982,5.547l-0.666-0.335C20.948,40.259,21.825,37.729,25.983,35.519L25.983,35.519L25.983,35.519
	L25.983,35.519L25.983,35.519z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E30921" d="M47.193,34.952l-0.023-0.368c-0.004-0.047,0-0.105-0.002-0.166
	h-0.005c-0.015,0.057-0.033,0.122-0.052,0.175l-0.127,0.351h-0.071l-0.124-0.36c-0.015-0.049-0.029-0.108-0.044-0.166H46.74
	c-0.001,0.058,0,0.11-0.003,0.166l-0.022,0.368h-0.089l0.047-0.61h0.121l0.119,0.331c0.016,0.046,0.028,0.097,0.043,0.153h0.003
	c0.014-0.056,0.028-0.11,0.043-0.155l0.12-0.329h0.119l0.046,0.61H47.193L47.193,34.952L47.193,34.952L47.193,34.952L47.193,34.952z
	 M46.604,34.342v0.078h-0.187v0.532h-0.091V34.42h-0.186v-0.078H46.604L46.604,34.342L46.604,34.342L46.604,34.342L46.604,34.342
	L46.604,34.342L46.604,34.342z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E30921" d="M28.566,34.342c0.568-0.985,1.172-2.088,1.814-3.325
	c1.3-2.505,2.067-4.538,2.526-6.316c0.834,2.165,2.059,4.225,3.702,5.639c0.511,0.44,1.075,0.845,1.667,1.215
	C35.172,32.035,31.688,32.963,28.566,34.342L28.566,34.342L28.566,34.342z M47.102,33.165c0.821-1.749-2.684-2.349-7.452-1.796
	c-0.838-0.472-1.652-1.007-2.389-1.593c-1.836-1.507-3.187-4.034-4.027-6.566c0.383-2.121,0.359-3.924,0.401-5.872
	c-0.182,0.888-0.312,2.372-0.811,4.482c-0.643-2.466-0.783-4.757-0.394-5.904c0.086-0.251,0.293-0.545,0.385-0.61
	c0.358,0.179,0.792,0.619,0.889,1.541c0.323-1.702-0.509-1.642-0.742-1.642l-0.523-0.004c-0.29,0-0.551,0.232-0.677,0.705
	c-0.431,1.605-0.225,4.505,0.669,7.419c-0.556,1.942-1.416,4.301-2.806,7.101c-3.741,7.533-6.472,11.047-8.29,10.306l0.649,0.333
	c1.21,0.617,3.286-1.02,6.551-6.667c3.069-1.107,7.154-1.921,10.714-2.278c3.505,1.878,7.53,2.523,7.734,1.313
	c-0.907,0.436-3.514-0.17-6.149-1.445C44.442,31.758,47.17,32.083,47.102,33.165L47.102,33.165L47.102,33.165L47.102,33.165z"/>
<path fill="none" stroke="#E30921" stroke-width="0.5197" stroke-miterlimit="2.6131" d="M28.566,34.342
	c0.568-0.985,1.172-2.088,1.814-3.325c1.3-2.505,2.067-4.538,2.526-6.316c0.834,2.165,2.059,4.225,3.702,5.639
	c0.511,0.44,1.075,0.845,1.667,1.215C35.172,32.035,31.688,32.963,28.566,34.342L28.566,34.342L28.566,34.342z M47.102,33.165
	c0.821-1.749-2.684-2.349-7.452-1.796c-0.838-0.472-1.652-1.007-2.389-1.593c-1.836-1.507-3.187-4.034-4.027-6.566
	c0.383-2.121,0.359-3.924,0.401-5.872c-0.182,0.888-0.312,2.372-0.811,4.482c-0.643-2.466-0.783-4.757-0.394-5.904
	c0.086-0.251,0.293-0.545,0.385-0.61c0.358,0.179,0.792,0.619,0.889,1.541c0.323-1.702-0.509-1.642-0.742-1.642l-0.523-0.004
	c-0.29,0-0.551,0.232-0.677,0.705c-0.431,1.605-0.225,4.505,0.669,7.419c-0.556,1.942-1.416,4.301-2.806,7.101
	c-3.741,7.533-6.472,11.047-8.29,10.306l0.649,0.333c1.21,0.617,3.286-1.02,6.551-6.667c3.069-1.107,7.154-1.921,10.714-2.278
	c3.505,1.878,7.53,2.523,7.734,1.313c-0.907,0.436-3.514-0.17-6.149-1.445C44.442,31.758,47.17,32.083,47.102,33.165"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M19.435,42.372l-0.528-2.746c-0.148-0.775-0.302-1.79-0.431-2.613
	h-0.053c-0.129,0.834-0.298,1.882-0.446,2.623l-0.542,2.736H19.435L19.435,42.372L19.435,42.372L19.435,42.372L19.435,42.372z
	 M17.233,43.649l-0.675,3.17h-1.566l2.582-11.478h1.856l2.442,11.478h-1.585l-0.667-3.17H17.233L17.233,43.649L17.233,43.649
	L17.233,43.649L17.233,43.649L17.233,43.649L17.233,43.649z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M26.4,41.704c0-0.164,0-0.352-0.025-0.524
	c-0.076-0.741-0.504-1.392-1.079-1.392c-0.985,0-1.331,1.391-1.331,2.936c0,1.689,0.442,2.89,1.275,2.89
	c0.367,0,0.846-0.192,1.103-1.175c0.041-0.146,0.058-0.334,0.058-0.539V41.704L26.4,41.704L26.4,41.704L26.4,41.704L26.4,41.704z
	 M28.008,35.036v9.649c0,0.631,0.043,1.56,0.067,2.135h-1.387l-0.1-1.004h-0.053c-0.277,0.586-0.894,1.14-1.728,1.14
	c-1.521,0-2.463-1.661-2.463-4.243c0-2.914,1.239-4.297,2.549-4.297c0.653,0,1.183,0.307,1.472,0.93H26.4v-4.309H28.008
	L28.008,35.036L28.008,35.036L28.008,35.036L28.008,35.036L28.008,35.036L28.008,35.036z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M31.874,45.692c0.992,0,1.207-1.75,1.207-3.016
	c0-1.225-0.215-3-1.242-3c-1.047,0-1.255,1.775-1.255,3c0,1.383,0.239,3.016,1.272,3.016H31.874L31.874,45.692L31.874,45.692
	L31.874,45.692L31.874,45.692z M31.831,46.955c-1.647,0-2.849-1.423-2.849-4.255c0-2.998,1.422-4.285,2.92-4.285
	c1.632,0,2.814,1.469,2.814,4.254c0,3.282-1.626,4.286-2.869,4.286H31.831L31.831,46.955L31.831,46.955L31.831,46.955L31.831,46.955
	L31.831,46.955L31.831,46.955z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M37.293,43.887c0,0.194,0.024,0.38,0.066,0.519
	c0.264,1.01,0.743,1.208,1.073,1.208c0.951,0,1.305-1.263,1.305-2.96c0-1.582-0.371-2.865-1.323-2.865
	c-0.521,0-0.955,0.625-1.064,1.235c-0.032,0.165-0.057,0.376-0.057,0.548V43.887L37.293,43.887L37.293,43.887L37.293,43.887
	L37.293,43.887z M35.686,35.036h1.607v4.444h0.034c0.419-0.75,1.005-1.064,1.737-1.064c1.397,0,2.291,1.59,2.291,4.135
	c0,2.959-1.206,4.405-2.571,4.405c-0.815,0-1.27-0.433-1.635-1.183h-0.053l-0.101,1.047h-1.379c0.025-0.56,0.068-1.504,0.068-2.135
	V35.036L35.686,35.036L35.686,35.036L35.686,35.036L35.686,35.036L35.686,35.036L35.686,35.036z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M45.958,41.853c0.019-1.456-0.493-2.223-1.129-2.223
	c-0.819,0-1.203,1.188-1.249,2.223H45.958L45.958,41.853L45.958,41.853L45.958,41.853L45.958,41.853z M43.571,43.017
	c0.016,2.119,0.928,2.635,1.887,2.635c0.591,0,1.088-0.138,1.439-0.301l0.24,1.17c-0.494,0.248-1.256,0.393-1.973,0.393
	c-2.073,0-3.172-1.575-3.172-4.123c0-2.715,1.246-4.384,2.963-4.384c1.721,0,2.52,1.653,2.52,3.731c0,0.414-0.016,0.67-0.04,0.887
	L43.571,43.017L43.571,43.017L43.571,43.017L43.571,43.017L43.571,43.017L43.571,43.017L43.571,43.017z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#B2B1B1" d="M49.925,10.912c0-0.524,0-1.036,0-1.529h-7.589v1.529H49.925
	L49.925,10.912L49.925,10.912z"/>
</svg>
\rules except wikilink

<$button class="cpfadeable">{{$:/core/images/preview-open}}&nbsp;MultiMedia</$button>
<$button tooltip="View the next paper">
Next {{$:/core/images/right-arrow}}
<$action-navigate $to={{!!next_paper}}/>
</$button>
<$button tooltip="View the next session">
Next {{$:/core/images/right-arrow}}
<$action-navigate $to={{!!next_session_title}}/>
</$button>
@@.cppinktext ''NO&nbsp;PDF''@@
<span class="cpicon cpfadeable cpw25px">{{$:/causal/images/pdficon02}}</span>
<span class="cpicon cpfadeable cpw25px">{{$:/causal/images/pdficon02}}</span>
<span class="cpicon cpfadeable cpw25px">{{$:/causal/images/pdficon02}}</span>
<span class="cpicon cpfadeable cpw25px">{{$:/causal/images/pdficon02}}</span>
<$button tooltip="View the preceding paper">
{{$:/core/images/left-arrow}} Prev
<$action-navigate $to={{!!prev_paper}}/>
</$button>
<$button tooltip="View the preceding session">
{{$:/core/images/left-arrow}} Prev
<$action-navigate $to={{!!prev_session_title}}/>
</$button>
!!!Count of Tiddlers with subscript/superscript in title: <$count filter="[regexp[,,]] [regexp[\^\^]]"/>

List of Tiddlers with subscript/superscript in title:

<$list filter="[regexp[,,]] [regexp[\^\^]]"/>

!!!Count of Missing Tiddlers: <$count filter="[all[missing]sort[title]]"/>

List of Missing Tiddlers:

<$list filter="[all[missing]sort[title]]"/>

\rules except wikilink
Proceedings of the 21st Annual Conference of the International Speech Communication Association (INTERSPEECH 2020). ISSN 2308-457X. © 2020 International Speech Communication Association. All rights reserved.  For technical support please contact Causal Productions (info@causalproductions.com).
{{Session List}}
<$button tooltip="View the Session List">
{{$:/core/images/up-arrow}} Sessions
<$action-navigate $to="Session List"/>
</$button>
/*
 * CONFERENCE Paper abstract card
 */

.cpabstractcardauthorheading { font-size:1em; }

/* the following style is for the <div> that contains the author names (maybe multiline) and affiliation
names (maybe multiline). the 0.75em spaces it a bit from the button row that follows */
.cpabstractcardauthorarea { font-size:1em; line-height:1.15; margin-top:0.5em; margin-bottom:0.75em; }
/* the following style is for the <p> that contains the author names only */
p.cpabstractcardauthornames { font-style:normal; margin-top:0em; margin-bottom:0em; }
/* the following style is for the <p> that contains the affiliations only, the 0.25em separates it from the author names */
p.cpabstractcardaffiliationlist { font-style:italic; margin-top:0.25em; margin-bottom:0em; }
/* the abstract paragraph is the last thing on the tiddler so make the p bottom margin zero */
.cpabstractcardabstract { font-size:1em; line-height:1.15; }
.cpabstractcardabstract > p { margin-top:0.75em; margin-bottom:0em; }

/* the following style is for the <p> that contains the buttons in a single row.  The 0.5 spaces the rows close together. */
.cpbuttonrow > p { margin-top:0.5em; margin-bottom:0.5em; }

/* the following style is for the VIEW PDF button which might have a MULTIMEDIA button next to it.
Need separate <p> style "lineheightforbuttons" to avoid extra vertical space due to line-height, and the <span>
is needed to keep the hover area confined with the buttons and not full width.  The hover
tooltip is vertically sized by the line-height of the span. */
.lineheightforbuttons { line-height:1em; }
.cpabscardpdfandmediabutton { display:inline-flex;align-items:flex-start;line-height:1.5em; }
.cpaidxlinkrowstyle { width:30px;text-align:left;padding-left:0;margin-left:0; }
/* the following style is based on the normal table top margin of 1em, with margin-top
reduced to 0.5em because the link row table is borderless so it needs to be moved a bit
closer to surrounding elements. The bottom margin is zero because that is the end of the tiddler. */
.cpaidxlinkrowtable { margin-top:0.5em; margin-bottom:0em; }
.cpaidxlinkrowtable td { padding-left:0em; padding-right:1em; }
/*
 * CONFERENCE Author Index List tiddler styles
 */

/* the author list is a borderless table so reduce margin-top to 0.5em to make the vertical whitespace appear
consistent with bordered tables.  Bottom margin is set to zero because that is the end of the tiddler. */
.cpauthorindexlisttable { margin-top:0.5em; margin-bottom:0em; }
/* the next line ensures all td elements within a .cpsesslisttable have zero left-right padding
and I include the font and line-height definition to avoid adding more structure elements */
.cpauthorindexlisttable td { padding-left:0em; padding-right:0em; font-size:1em; line-height:1.5; }
/*
 * CONFERENCE Author Index Person Card
 */

/* the following style is for the author paper table.  0.75em at top is standard
for all our bordered tables, and 0em at bottom because it is the end of the tiddler */
.cpaidxauthortable { margin-top:1em; margin-bottom:0em; }

/* the following styles are used within the table */
.cpauthorindexpersoncardauthorname { font-size:1em; font-weight:bold; }
.cpauthorindexpersoncardconferencename { font-size:1em; font-weight:bold; }
.cpauthorindexpersoncardpapercode { font-size:1em; line-height:1.15; white-space:nowrap; }
.cpauthorindexpersoncardpapertitle { font-size:1em; line-height:1.15; }
/*
 * Global change to TIDDLYWIKI built-in styles
 */

/* make the titlebar smaller.  This affects the tiddler title, and the 3 control buttons on top right
 */
.tc-titlebar { font-size:1.2em; }

/* the margin-bottom spec in the next class allows vertical space between tiddler title and body to close
 */
.tc-titlebar h2 { font-weight: bold; margin-bottom:0.5em; }

/* the tiddler body begins with a <p> so the top margin contributes to the space between title and body.
The following selector selects the first child <p> of the tiddler-body and sets the top/bottom margin to
a minimum value, which can be extended in cases such as the abstract card author list.
 */
.tc-tiddler-body > p { margin-top:0.5em; margin-bottom:0.5em; }

/* the following makes the tags wrapper disappear, allowing the vertical space between tiddler title and
tiddler body to close.
 */
.tc-tags-wrapper { display: none; }

\rules except wikilink
.cpwelcomepagespaceaboveiconwithconferencename { padding-top:0.75em; }
.cpwelcomepagespaceaboveiconwithoutconferencename { padding-top:0.0em; }

/* the following styles force the conference logos to lose their descender padding due
to the line-height of the parent */
.cpwelcomepagespaceaboveiconwithconferencename > img { display:block; }
.cpwelcomepagespaceaboveiconwithoutconferencename > img { display:block; }

.icon_size_on_welcome_page { width:250px; }
/* the confinfo page table is borderess so reduce the top margin a bit to make it consistent
with other tiddlers.  Bottom margin is set to zero because that is the end of the tiddler. */
.cpconfinfotable { margin-top:1em; margin-bottom:0em; }
.cpconfinfotable td { padding-left:0em; padding-bottom:0.5em; }
.cpconfinfotable tr:last-child td { padding-bottom:0em; }
/* the following style is used for <a> elements surrounding buttons, to ensure that
the text inside the button does not cause a mysterious underline character to appear between
buttons on the same line, and force the text color to black instead of normal link blue.
Note that the TW text colour is not black but 51^3. */
a.externallinkbutton { color: rgb(51,51,51); text-decoration: none; } 

/* the following reveals and styles allow buttons and table cells with class
"cpfadeable" to be faded when turned off.  Specifically, PDF and MEDIA link
buttons can be switched off, resulting in not clickable links (can still
be tabbed and entered but ignore this), and faded appearance */

<$reveal type="match" state="$:/causal/config/hidePDFandMEDIA" text="hide">
a.externallinkbutton {
pointer-events: none;
cursor: default;
}
.cpfadeable {
opacity: 0.33;
}
.cpabscardpdfandmediabutton:hover::after, .cpaidxauthortable td:first-child:hover::after, .cpconfinfotable td:first-child:hover::after, .cpsessionviewtable td:first-child:hover::after {
display: inline;
position: absolute;
border: 1px solid #ccc;
border-radius: 4px;
box-shadow: 1px 1px 4px #000;
background-color: #fff;
margin-left: 5px;
margin-top: -25px;
padding: 3px;
opacity: 1;
}
.cpabscardpdfandmediabutton::after, .cpaidxauthortable td:first-child::after, .cpconfinfotable td:first-child::after, .cpsessionviewtable td:first-child::after {
content: "PDF+MEDIA files are only available in the final proceedings";
opacity: 1;
}
.cpabscardpdfandmediabutton::after, .cpaidxauthortable td:first-child::after, .cpconfinfotable td:first-child::after, .cpsessionviewtable td:first-child::after {
display: none;
}
</$reveal>
<$reveal type="match" state="$:/causal/config/hidePDFandMEDIA" text="show">
.cpfadeable {
opacity: 1;
}
</$reveal>
.cpconferencedisambiguator { font-size:1.12em; font-weight:bold; }
.cpprevnextanchortext { font-size:1.12em; font-weight:bold; }
.cpredtext { color:red; }
.cppinktext { color:#FFB0B0; }
.cpcenter { text-align:center; }
.cpmailingaddress { padding-left:2em; }

.cptightlineheight { line-height:1.15; }
.cpemabovezerobelow { margin-top:1em; margin-bottom:0em; }

.cpcopyrightpage { line-height:1.15; margin-top:0.75em; margin-bottom:0em; }
.cpsupportpage   { line-height:1.15; margin-top:0.75em; margin-bottom:0em; }
.cpsupportpagetable { margin-top:1em; margin-bottom:0em; }

/* the following causes cpicon to have no line-height, otherwise the icons
get a descender margin below the icon caused by the font style of the parent */
.cpicon > img { display: block; }

.cpw25px > img { width:25px; }

/* the following is used in the session view to force a minimum width for the pdf icon column, using @@ ... @@ syntax */
.pdficonintable { display:block;width:30px; }
/*
 * CONFERENCE Session List tiddler styles
 */

/* the session list is a borderess table so reduce the margin-top to 0.5em to make it consistent
with bordered tables.  Bottom margin is set to zero because that is the end of the tiddler. */
.cpsessionlisttable { margin-top:0.5em; margin-bottom:0em; }
/* the next line ensures all td elements within a .cpsesslisttable have zero left-right padding */
.cpsessionlisttable td { padding-left:0em; padding-right:0.5em; }

/* note that in session list table, the vertical alignment of table cells must be done
using TW5 operators and not CSS.  Operators such as display:flex and align-content:flex-start do not seem to work. */
.cpsessionlistsessioncode { font-size:1em; line-height:1.15; white-space:nowrap; }
.cpsessionlistsessionname { font-size:1em; line-height:1.15; }
/*
 * CONFERENCE Session View tiddler styles
 */

/* the following style adds a bit of space above and below table row to separate cell text from rulers */
table.cpsessionviewtable { margin-top:0.75em; margin-bottom:0em; }

/* the following styles are for entries within the session view table */
.cpsessionviewpapercode  { font-size:1em; line-height:1.15; white-space:nowrap; }
.cpsessionviewpapertitle { font-size:1em; line-height:1.15; }
.cpsessionviewpaperauthor { font-size:1em;font-style:italic;line-height:1.15; }

.cpsessionviewmetadata { font-size:1em; line-height:1.15; }
.cpsessionviewmetadata table { margin-top:0.6em; margin-bottom:0.75em; }
.cpsessionviewmetadata tr:first-child td:first-child { padding-bottom:0.2em; } /* make the padding 0.2em on the bottom of top left cell, to space this row a bit more from subsequent rows */
.cpsessionviewmetadata td { padding-left:0px; padding-right:0px; }
.cpsessionviewmetadata td:first-child { width:1px; white-space: nowrap; } /* ensure that 'chairs:' column is just wide enough for the word */
/* the following class is used to make borderless tables */
.cpborderless,
.cpborderless table,
.cpborderless td,
.cpborderless tr,
.cpborderless th,
.cpborderless tbody { border:0 !important; }

/* the following class essentially defines the visual appearance of H2 headers, for use
in tables where tiddler !! syntax does not work.  For all header style definitions see w3schools
or t287/00_gv.txt */
.cph2 { display: block; font-size: 1.5em; margin-top: 0.83em; margin-bottom: 0.83em; margin-left: 0; margin-right: 0; font-weight: bold; }
.cph3 { display: block; font-size: 1.0em; margin-top: 0.83em; margin-bottom: 0.83em; margin-left: 0; margin-right: 0; font-weight: bold; }

/* the following allows tables to have extra space between content and row divider rules */
.cptablecelltopbottomspace1 td { padding-top:0.1em; padding-bottom:0.1em; }
.cptablecelltopbottomspace2 td { padding-top:0.2em; padding-bottom:0.2em; }
.cptablecelltopbottomspace3 td { padding-top:0.3em; padding-bottom:0.3em; }
/*
 * Welcome Page tiddler styles
 */
/* width of svg logo for the whole publication */
.cppublicationsvg { width:TODO_publication_welcomeartwork_displaywidth; }
.cppublicationname { font-weight:bold;font-size:1.3em; }
.cppublicationdatevenue {
font-size:1.1em;
display:flex;
justify-content:space-between;
}

/* each individual conference in the publication is named in the following style */
.cpwelcomepageconferencename { font-weight:bold;line-height:1.2; }

/* the following style is for the publication header which is a table with icon in left cell
 and conference name and date/venue in right cell.  We need to have a small top margin to separate
 from the tiddler title.
*/
.cpwelcomepagepublicationtable,
.cpwelcomepagepublicationtable td { margin-top:1em; margin-bottom:0px; padding-top:0px; padding-bottom:0px; }

/* the following style is for a table which contains a per-conference row with icon in left cell, and major
headings in right cell such as preface, session list, author index.  We want all margins to be zero so it
can butt up to its vertical neighbours efficiently.
*/
.cpwelcomepageconferencetable,
.cpwelcomepageconferencetable td { margin-top:0px; margin-bottom:0px; padding-top:0px; padding-bottom:0px; }

/* the copyright message is displayed in tiny font on the welcome page.  To make it readable the user can click on the COPYRIGHT STATEMENT heading to see the text in a readable tiddler */
.cpwelcomepagecopyright { display: block; font-size: 0.5em; margin-top: 0.1em; margin-bottom: 0.1em; margin-left: 0; margin-right: 0; font-weight: bold; line-height:1.5em; }

/* the following style is applied to the conference information, session list, and author index links.
TW mandates that the links be blue, and not bold, so specifying these in the following style will have
no effect.  We can control font size, italic, and other parameters which will work correctly. */
.cpwelcomepageconferencelinks {}
\rules except wikilink

<$button>{{$:/core/images/preview-open}}&nbsp;View&nbsp;Folder</$button>
\rules except wikilink

<$checkbox tiddler="$:/state/causal" field="view multimedia list" checked="yes" unchecked="no" default="no"> View MultiMedia list</$checkbox>
<a href={{!!pdf_file_full_name}} class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in abscard view}}</a>
<$button tooltip="View the top level Welcome Page">
{{$:/core/images/up-arrow}} Welcome
<$action-navigate $to="Welcome Page"/>
</$button>
\rules except wikilink

<$button class="cpfadeable">{{$:/core/images/preview-open}}&nbsp;Accompanying ZIP</$button>
hide
show
hide
hide
hide
hide
{
    "tiddlers": {
        "$:/Acknowledgements": {
            "title": "$:/Acknowledgements",
            "type": "text/vnd.tiddlywiki",
            "text": "TiddlyWiki incorporates code from these fine OpenSource projects:\n\n* [[The Stanford Javascript Crypto Library|http://bitwiseshiftleft.github.io/sjcl/]]\n* [[The Jasmine JavaScript Test Framework|http://pivotal.github.io/jasmine/]]\n* [[Normalize.css by Nicolas Gallagher|http://necolas.github.io/normalize.css/]]\n\nAnd media from these projects:\n\n* World flag icons from [[Wikipedia|http://commons.wikimedia.org/wiki/Category:SVG_flags_by_country]]\n"
        },
        "$:/core/copyright.txt": {
            "title": "$:/core/copyright.txt",
            "type": "text/plain",
            "text": "TiddlyWiki created by Jeremy Ruston, (jeremy [at] jermolene [dot] com)\n\nCopyright © Jeremy Ruston 2004-2007\nCopyright © UnaMesa Association 2007-2016\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright notice, this\nlist of conditions and the following disclaimer in the documentation and/or other\nmaterials provided with the distribution.\n\nNeither the name of the UnaMesa Association nor the names of its contributors may be\nused to endorse or promote products derived from this software without specific\nprior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY\nEXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\nOF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT\nSHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\nBUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\nANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n"
        },
        "$:/core/icon": {
            "title": "$:/core/icon",
            "tags": "$:/tags/Image",
            "text": "<svg width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\"><path d=\"M64 0l54.56 32v64L64 128 9.44 96V32L64 0zm21.127 95.408c-3.578-.103-5.15-.094-6.974-3.152l-1.42.042c-1.653-.075-.964-.04-2.067-.097-1.844-.07-1.548-1.86-1.873-2.8-.52-3.202.687-6.43.65-9.632-.014-1.14-1.593-5.17-2.157-6.61-1.768.34-3.546.406-5.34.497-4.134-.01-8.24-.527-12.317-1.183-.8 3.35-3.16 8.036-1.21 11.44 2.37 3.52 4.03 4.495 6.61 4.707 2.572.212 3.16 3.18 2.53 4.242-.55.73-1.52.864-2.346 1.04l-1.65.08c-1.296-.046-2.455-.404-3.61-.955-1.93-1.097-3.925-3.383-5.406-5.024.345.658.55 1.938.24 2.53-.878 1.27-4.665 1.26-6.4.47-1.97-.89-6.73-7.162-7.468-11.86 1.96-3.78 4.812-7.07 6.255-11.186-3.146-2.05-4.83-5.384-4.61-9.16l.08-.44c-3.097.59-1.49.37-4.82.628-10.608-.032-19.935-7.37-14.68-18.774.34-.673.664-1.287 1.243-.994.466.237.4 1.18.166 2.227-3.005 13.627 11.67 13.732 20.69 11.21.89-.25 2.67-1.936 3.905-2.495 2.016-.91 4.205-1.282 6.376-1.55 5.4-.63 11.893 2.276 15.19 2.37 3.3.096 7.99-.805 10.87-.615 2.09.098 4.143.483 6.16 1.03 1.306-6.49 1.4-11.27 4.492-12.38 1.814.293 3.213 2.818 4.25 4.167 2.112-.086 4.12.46 6.115 1.066 3.61-.522 6.642-2.593 9.833-4.203-3.234 2.69-3.673 7.075-3.303 11.127.138 2.103-.444 4.386-1.164 6.54-1.348 3.507-3.95 7.204-6.97 7.014-1.14-.036-1.805-.695-2.653-1.4-.164 1.427-.81 2.7-1.434 3.96-1.44 2.797-5.203 4.03-8.687 7.016-3.484 2.985 1.114 13.65 2.23 15.594 1.114 1.94 4.226 2.652 3.02 4.406-.37.58-.936.785-1.54 1.01l-.82.11zm-40.097-8.85l.553.14c.694-.27 2.09.15 2.83.353-1.363-1.31-3.417-3.24-4.897-4.46-.485-1.47-.278-2.96-.174-4.46l.02-.123c-.582 1.205-1.322 2.376-1.72 3.645-.465 1.71 2.07 3.557 3.052 4.615l.336.3z\" fill-rule=\"evenodd\"/></svg>"
        },
        "$:/core/images/advanced-search-button": {
            "title": "$:/core/images/advanced-search-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-advanced-search-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M74.5651535,87.9848361 C66.9581537,93.0488876 57.8237115,96 48,96 C21.490332,96 0,74.509668 0,48 C0,21.490332 21.490332,0 48,0 C74.509668,0 96,21.490332 96,48 C96,57.8541369 93.0305793,67.0147285 87.9377231,74.6357895 L122.284919,108.982985 C125.978897,112.676963 125.973757,118.65366 122.284271,122.343146 C118.593975,126.033442 112.613238,126.032921 108.92411,122.343793 L74.5651535,87.9848361 Z M48,80 C65.673112,80 80,65.673112 80,48 C80,30.326888 65.673112,16 48,16 C30.326888,16 16,30.326888 16,48 C16,65.673112 30.326888,80 48,80 Z\"></path>\n        <circle cx=\"48\" cy=\"48\" r=\"8\"></circle>\n        <circle cx=\"28\" cy=\"48\" r=\"8\"></circle>\n        <circle cx=\"68\" cy=\"48\" r=\"8\"></circle>\n    </g>\n</svg>"
        },
        "$:/core/images/auto-height": {
            "title": "$:/core/images/auto-height",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-auto-height tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <path d=\"M67.9867828,114.356363 L67.9579626,99.8785426 C67.9550688,98.4248183 67.1636987,97.087107 65.8909901,96.3845863 L49.9251455,87.5716209 L47.992126,95.0735397 L79.8995411,95.0735397 C84.1215894,95.0735397 85.4638131,89.3810359 81.686497,87.4948823 L49.7971476,71.5713518 L48.0101917,79.1500092 L79.992126,79.1500092 C84.2093753,79.1500092 85.5558421,73.4676733 81.7869993,71.5753162 L49.805065,55.517008 L48.0101916,63.0917009 L79.9921259,63.0917015 C84.2035118,63.0917016 85.5551434,57.4217887 81.7966702,55.5218807 L65.7625147,47.4166161 L67.9579705,50.9864368 L67.9579705,35.6148245 L77.1715737,44.8284272 C78.7336709,46.3905243 81.2663308,46.3905243 82.8284279,44.8284271 C84.390525,43.2663299 84.390525,40.7336699 82.8284278,39.1715728 L66.8284271,23.1715728 C65.2663299,21.6094757 62.73367,21.6094757 61.1715729,23.1715729 L45.1715729,39.1715729 C43.6094757,40.73367 43.6094757,43.26633 45.1715729,44.8284271 C46.73367,46.3905243 49.26633,46.3905243 50.8284271,44.8284271 L59.9579705,35.6988837 L59.9579705,50.9864368 C59.9579705,52.495201 60.806922,53.8755997 62.1534263,54.5562576 L78.1875818,62.6615223 L79.9921261,55.0917015 L48.0101917,55.0917009 C43.7929424,55.0917008 42.4464755,60.7740368 46.2153183,62.6663939 L78.1972526,78.7247021 L79.992126,71.1500092 L48.0101917,71.1500092 C43.7881433,71.1500092 42.4459197,76.842513 46.2232358,78.7286665 L78.1125852,94.6521971 L79.8995411,87.0735397 L47.992126,87.0735397 C43.8588276,87.0735397 42.4404876,92.5780219 46.0591064,94.5754586 L62.024951,103.388424 L59.9579785,99.8944677 L59.9867142,114.32986 L50.8284271,105.171573 C49.26633,103.609476 46.73367,103.609476 45.1715729,105.171573 C43.6094757,106.73367 43.6094757,109.26633 45.1715729,110.828427 L61.1715729,126.828427 C62.73367,128.390524 65.2663299,128.390524 66.8284271,126.828427 L82.8284278,110.828427 C84.390525,109.26633 84.390525,106.73367 82.8284279,105.171573 C81.2663308,103.609476 78.7336709,103.609476 77.1715737,105.171573 L67.9867828,114.356363 L67.9867828,114.356363 Z M16,20 L112,20 C114.209139,20 116,18.209139 116,16 C116,13.790861 114.209139,12 112,12 L16,12 C13.790861,12 12,13.790861 12,16 C12,18.209139 13.790861,20 16,20 L16,20 Z\"></path>\n</svg>"
        },
        "$:/core/images/blank": {
            "title": "$:/core/images/blank",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-blank tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\"></svg>"
        },
        "$:/core/images/bold": {
            "title": "$:/core/images/bold",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-bold tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M41.1456583,51.8095238 L41.1456583,21.8711485 L67.4985994,21.8711485 C70.0084159,21.8711485 72.4285598,22.0802967 74.7591036,22.4985994 C77.0896475,22.9169022 79.1512515,23.6638602 80.9439776,24.7394958 C82.7367036,25.8151314 84.170863,27.3090474 85.2464986,29.2212885 C86.3221342,31.1335296 86.859944,33.5835518 86.859944,36.5714286 C86.859944,41.9496067 85.2465147,45.8337882 82.0196078,48.2240896 C78.792701,50.614391 74.6694929,51.8095238 69.6498599,51.8095238 L41.1456583,51.8095238 Z M13,0 L13,128 L75.0280112,128 C80.7647346,128 86.3519803,127.28292 91.789916,125.848739 C97.2278517,124.414559 102.068139,122.203563 106.310924,119.215686 C110.553709,116.22781 113.929959,112.373506 116.439776,107.652661 C118.949592,102.931816 120.204482,97.3445701 120.204482,90.8907563 C120.204482,82.8832466 118.262391,76.0411115 114.378151,70.3641457 C110.493911,64.6871798 104.607883,60.7133634 96.719888,58.442577 C102.456611,55.6937304 106.788968,52.1680887 109.717087,47.8655462 C112.645206,43.5630037 114.109244,38.1849062 114.109244,31.7310924 C114.109244,25.7553389 113.123259,20.7357813 111.151261,16.6722689 C109.179262,12.6087565 106.400578,9.35201972 102.815126,6.90196078 C99.2296739,4.45190185 94.927196,2.68908101 89.907563,1.61344538 C84.8879301,0.537809748 79.3305627,0 73.2352941,0 L13,0 Z M41.1456583,106.128852 L41.1456583,70.9915966 L71.8011204,70.9915966 C77.896389,70.9915966 82.7964334,72.3958776 86.5014006,75.2044818 C90.2063677,78.0130859 92.0588235,82.7039821 92.0588235,89.2773109 C92.0588235,92.6237329 91.4911355,95.3725383 90.3557423,97.5238095 C89.2203491,99.6750808 87.6965548,101.378145 85.7843137,102.633053 C83.8720726,103.887961 81.661077,104.784311 79.1512605,105.322129 C76.641444,105.859947 74.0121519,106.128852 71.2633053,106.128852 L41.1456583,106.128852 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/cancel-button": {
            "title": "$:/core/images/cancel-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-cancel-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n\t<g fill-rule=\"evenodd\">\n\t    <path d=\"M64,76.3137085 L47.0294734,93.2842351 C43.9038742,96.4098343 38.8399231,96.4084656 35.7157288,93.2842712 C32.5978915,90.166434 32.5915506,85.0947409 35.7157649,81.9705266 L52.6862915,65 L35.7157649,48.0294734 C32.5901657,44.9038742 32.5915344,39.8399231 35.7157288,36.7157288 C38.833566,33.5978915 43.9052591,33.5915506 47.0294734,36.7157649 L64,53.6862915 L80.9705266,36.7157649 C84.0961258,33.5901657 89.1600769,33.5915344 92.2842712,36.7157288 C95.4021085,39.833566 95.4084494,44.9052591 92.2842351,48.0294734 L75.3137085,65 L92.2842351,81.9705266 C95.4098343,85.0961258 95.4084656,90.1600769 92.2842712,93.2842712 C89.166434,96.4021085 84.0947409,96.4084494 80.9705266,93.2842351 L64,76.3137085 Z M64,129 C99.346224,129 128,100.346224 128,65 C128,29.653776 99.346224,1 64,1 C28.653776,1 1.13686838e-13,29.653776 1.13686838e-13,65 C1.13686838e-13,100.346224 28.653776,129 64,129 Z M64,113 C90.509668,113 112,91.509668 112,65 C112,38.490332 90.509668,17 64,17 C37.490332,17 16,38.490332 16,65 C16,91.509668 37.490332,113 64,113 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/chevron-down": {
            "title": "$:/core/images/chevron-down",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-chevron-down tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n\t<g fill-rule=\"evenodd\" transform=\"translate(64.000000, 40.500000) rotate(-270.000000) translate(-64.000000, -40.500000) translate(-22.500000, -26.500000)\">\n        <path d=\"M112.743107,112.12741 C111.310627,113.561013 109.331747,114.449239 107.145951,114.449239 L27.9777917,114.449239 C23.6126002,114.449239 20.0618714,110.904826 20.0618714,106.532572 C20.0618714,102.169214 23.6059497,98.6159054 27.9777917,98.6159054 L99.2285381,98.6159054 L99.2285381,27.365159 C99.2285381,22.9999675 102.77295,19.4492387 107.145205,19.4492387 C111.508562,19.4492387 115.061871,22.993317 115.061871,27.365159 L115.061871,106.533318 C115.061871,108.71579 114.175869,110.694669 112.743378,112.127981 Z\" transform=\"translate(67.561871, 66.949239) rotate(-45.000000) translate(-67.561871, -66.949239) \"></path>\n        <path d=\"M151.35638,112.12741 C149.923899,113.561013 147.94502,114.449239 145.759224,114.449239 L66.5910645,114.449239 C62.225873,114.449239 58.6751442,110.904826 58.6751442,106.532572 C58.6751442,102.169214 62.2192225,98.6159054 66.5910645,98.6159054 L137.841811,98.6159054 L137.841811,27.365159 C137.841811,22.9999675 141.386223,19.4492387 145.758478,19.4492387 C150.121835,19.4492387 153.675144,22.993317 153.675144,27.365159 L153.675144,106.533318 C153.675144,108.71579 152.789142,110.694669 151.356651,112.127981 Z\" transform=\"translate(106.175144, 66.949239) rotate(-45.000000) translate(-106.175144, -66.949239) \"></path>\n\t</g>\n</svg>"
        },
        "$:/core/images/chevron-left": {
            "title": "$:/core/images/chevron-left",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-chevron-left tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\" version=\"1.1\">\n    <g fill-rule=\"evenodd\" transform=\"translate(92.500000, 64.000000) rotate(-180.000000) translate(-92.500000, -64.000000) translate(6.000000, -3.000000)\">\n        <path d=\"M112.743107,112.12741 C111.310627,113.561013 109.331747,114.449239 107.145951,114.449239 L27.9777917,114.449239 C23.6126002,114.449239 20.0618714,110.904826 20.0618714,106.532572 C20.0618714,102.169214 23.6059497,98.6159054 27.9777917,98.6159054 L99.2285381,98.6159054 L99.2285381,27.365159 C99.2285381,22.9999675 102.77295,19.4492387 107.145205,19.4492387 C111.508562,19.4492387 115.061871,22.993317 115.061871,27.365159 L115.061871,106.533318 C115.061871,108.71579 114.175869,110.694669 112.743378,112.127981 Z\" transform=\"translate(67.561871, 66.949239) rotate(-45.000000) translate(-67.561871, -66.949239) \"></path>\n        <path d=\"M151.35638,112.12741 C149.923899,113.561013 147.94502,114.449239 145.759224,114.449239 L66.5910645,114.449239 C62.225873,114.449239 58.6751442,110.904826 58.6751442,106.532572 C58.6751442,102.169214 62.2192225,98.6159054 66.5910645,98.6159054 L137.841811,98.6159054 L137.841811,27.365159 C137.841811,22.9999675 141.386223,19.4492387 145.758478,19.4492387 C150.121835,19.4492387 153.675144,22.993317 153.675144,27.365159 L153.675144,106.533318 C153.675144,108.71579 152.789142,110.694669 151.356651,112.127981 Z\" transform=\"translate(106.175144, 66.949239) rotate(-45.000000) translate(-106.175144, -66.949239) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/chevron-right": {
            "title": "$:/core/images/chevron-right",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-chevron-right tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\" transform=\"translate(-48.000000, -3.000000)\">\n        <path d=\"M112.743107,112.12741 C111.310627,113.561013 109.331747,114.449239 107.145951,114.449239 L27.9777917,114.449239 C23.6126002,114.449239 20.0618714,110.904826 20.0618714,106.532572 C20.0618714,102.169214 23.6059497,98.6159054 27.9777917,98.6159054 L99.2285381,98.6159054 L99.2285381,27.365159 C99.2285381,22.9999675 102.77295,19.4492387 107.145205,19.4492387 C111.508562,19.4492387 115.061871,22.993317 115.061871,27.365159 L115.061871,106.533318 C115.061871,108.71579 114.175869,110.694669 112.743378,112.127981 Z\" transform=\"translate(67.561871, 66.949239) rotate(-45.000000) translate(-67.561871, -66.949239) \"></path>\n        <path d=\"M151.35638,112.12741 C149.923899,113.561013 147.94502,114.449239 145.759224,114.449239 L66.5910645,114.449239 C62.225873,114.449239 58.6751442,110.904826 58.6751442,106.532572 C58.6751442,102.169214 62.2192225,98.6159054 66.5910645,98.6159054 L137.841811,98.6159054 L137.841811,27.365159 C137.841811,22.9999675 141.386223,19.4492387 145.758478,19.4492387 C150.121835,19.4492387 153.675144,22.993317 153.675144,27.365159 L153.675144,106.533318 C153.675144,108.71579 152.789142,110.694669 151.356651,112.127981 Z\" transform=\"translate(106.175144, 66.949239) rotate(-45.000000) translate(-106.175144, -66.949239) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/chevron-up": {
            "title": "$:/core/images/chevron-up",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-chevron-up tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n\t<g fill-rule=\"evenodd\" transform=\"translate(64.000000, 89.500000) rotate(-90.000000) translate(-64.000000, -89.500000) translate(-22.500000, 22.500000)\">\n        <path d=\"M112.743107,112.12741 C111.310627,113.561013 109.331747,114.449239 107.145951,114.449239 L27.9777917,114.449239 C23.6126002,114.449239 20.0618714,110.904826 20.0618714,106.532572 C20.0618714,102.169214 23.6059497,98.6159054 27.9777917,98.6159054 L99.2285381,98.6159054 L99.2285381,27.365159 C99.2285381,22.9999675 102.77295,19.4492387 107.145205,19.4492387 C111.508562,19.4492387 115.061871,22.993317 115.061871,27.365159 L115.061871,106.533318 C115.061871,108.71579 114.175869,110.694669 112.743378,112.127981 Z\" transform=\"translate(67.561871, 66.949239) rotate(-45.000000) translate(-67.561871, -66.949239) \"></path>\n        <path d=\"M151.35638,112.12741 C149.923899,113.561013 147.94502,114.449239 145.759224,114.449239 L66.5910645,114.449239 C62.225873,114.449239 58.6751442,110.904826 58.6751442,106.532572 C58.6751442,102.169214 62.2192225,98.6159054 66.5910645,98.6159054 L137.841811,98.6159054 L137.841811,27.365159 C137.841811,22.9999675 141.386223,19.4492387 145.758478,19.4492387 C150.121835,19.4492387 153.675144,22.993317 153.675144,27.365159 L153.675144,106.533318 C153.675144,108.71579 152.789142,110.694669 151.356651,112.127981 Z\" transform=\"translate(106.175144, 66.949239) rotate(-45.000000) translate(-106.175144, -66.949239) \"></path>\n\t</g>\n</svg>"
        },
        "$:/core/images/clone-button": {
            "title": "$:/core/images/clone-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-clone-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M32.2650915,96 L32.2650915,120.002359 C32.2650915,124.419334 35.8432884,128 40.2627323,128 L120.002359,128 C124.419334,128 128,124.421803 128,120.002359 L128,40.2627323 C128,35.8457573 124.421803,32.2650915 120.002359,32.2650915 L96,32.2650915 L96,48 L108.858899,48 C110.519357,48 111.853018,49.3405131 111.853018,50.9941198 L111.853018,108.858899 C111.853018,110.519357 110.512505,111.853018 108.858899,111.853018 L50.9941198,111.853018 C49.333661,111.853018 48,110.512505 48,108.858899 L48,96 L32.2650915,96 Z\"></path>\n        <path d=\"M40,56 L32.0070969,56 C27.5881712,56 24,52.418278 24,48 C24,43.5907123 27.5848994,40 32.0070969,40 L40,40 L40,32.0070969 C40,27.5881712 43.581722,24 48,24 C52.4092877,24 56,27.5848994 56,32.0070969 L56,40 L63.9929031,40 C68.4118288,40 72,43.581722 72,48 C72,52.4092877 68.4151006,56 63.9929031,56 L56,56 L56,63.9929031 C56,68.4118288 52.418278,72 48,72 C43.5907123,72 40,68.4151006 40,63.9929031 L40,56 Z M7.9992458,0 C3.58138434,0 0,3.5881049 0,7.9992458 L0,88.0007542 C0,92.4186157 3.5881049,96 7.9992458,96 L88.0007542,96 C92.4186157,96 96,92.4118951 96,88.0007542 L96,7.9992458 C96,3.58138434 92.4118951,0 88.0007542,0 L7.9992458,0 Z M19.0010118,16 C17.3435988,16 16,17.336731 16,19.0010118 L16,76.9989882 C16,78.6564012 17.336731,80 19.0010118,80 L76.9989882,80 C78.6564012,80 80,78.663269 80,76.9989882 L80,19.0010118 C80,17.3435988 78.663269,16 76.9989882,16 L19.0010118,16 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/close-all-button": {
            "title": "$:/core/images/close-all-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-close-all-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\" transform=\"translate(-23.000000, -23.000000)\">\n        <path d=\"M43,131 L22.9976794,131 C18.5827987,131 15,127.418278 15,123 C15,118.590712 18.5806831,115 22.9976794,115 L43,115 L43,94.9976794 C43,90.5827987 46.581722,87 51,87 C55.4092877,87 59,90.5806831 59,94.9976794 L59,115 L79.0023206,115 C83.4172013,115 87,118.581722 87,123 C87,127.409288 83.4193169,131 79.0023206,131 L59,131 L59,151.002321 C59,155.417201 55.418278,159 51,159 C46.5907123,159 43,155.419317 43,151.002321 L43,131 Z\" transform=\"translate(51.000000, 123.000000) rotate(-45.000000) translate(-51.000000, -123.000000) \"></path>\n        <path d=\"M43,59 L22.9976794,59 C18.5827987,59 15,55.418278 15,51 C15,46.5907123 18.5806831,43 22.9976794,43 L43,43 L43,22.9976794 C43,18.5827987 46.581722,15 51,15 C55.4092877,15 59,18.5806831 59,22.9976794 L59,43 L79.0023206,43 C83.4172013,43 87,46.581722 87,51 C87,55.4092877 83.4193169,59 79.0023206,59 L59,59 L59,79.0023206 C59,83.4172013 55.418278,87 51,87 C46.5907123,87 43,83.4193169 43,79.0023206 L43,59 Z\" transform=\"translate(51.000000, 51.000000) rotate(-45.000000) translate(-51.000000, -51.000000) \"></path>\n        <path d=\"M115,59 L94.9976794,59 C90.5827987,59 87,55.418278 87,51 C87,46.5907123 90.5806831,43 94.9976794,43 L115,43 L115,22.9976794 C115,18.5827987 118.581722,15 123,15 C127.409288,15 131,18.5806831 131,22.9976794 L131,43 L151.002321,43 C155.417201,43 159,46.581722 159,51 C159,55.4092877 155.419317,59 151.002321,59 L131,59 L131,79.0023206 C131,83.4172013 127.418278,87 123,87 C118.590712,87 115,83.4193169 115,79.0023206 L115,59 Z\" transform=\"translate(123.000000, 51.000000) rotate(-45.000000) translate(-123.000000, -51.000000) \"></path>\n        <path d=\"M115,131 L94.9976794,131 C90.5827987,131 87,127.418278 87,123 C87,118.590712 90.5806831,115 94.9976794,115 L115,115 L115,94.9976794 C115,90.5827987 118.581722,87 123,87 C127.409288,87 131,90.5806831 131,94.9976794 L131,115 L151.002321,115 C155.417201,115 159,118.581722 159,123 C159,127.409288 155.419317,131 151.002321,131 L131,131 L131,151.002321 C131,155.417201 127.418278,159 123,159 C118.590712,159 115,155.419317 115,151.002321 L115,131 Z\" transform=\"translate(123.000000, 123.000000) rotate(-45.000000) translate(-123.000000, -123.000000) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/close-button": {
            "title": "$:/core/images/close-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-close-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M65.0864256,75.4091629 L14.9727349,125.522854 C11.8515951,128.643993 6.78104858,128.64922 3.65685425,125.525026 C0.539017023,122.407189 0.5336324,117.334539 3.65902635,114.209145 L53.7727171,64.0954544 L3.65902635,13.9817637 C0.537886594,10.8606239 0.532659916,5.79007744 3.65685425,2.6658831 C6.77469148,-0.451954124 11.8473409,-0.457338747 14.9727349,2.66805521 L65.0864256,52.7817459 L115.200116,2.66805521 C118.321256,-0.453084553 123.391803,-0.458311231 126.515997,2.6658831 C129.633834,5.78372033 129.639219,10.8563698 126.513825,13.9817637 L76.4001341,64.0954544 L126.513825,114.209145 C129.634965,117.330285 129.640191,122.400831 126.515997,125.525026 C123.39816,128.642863 118.32551,128.648248 115.200116,125.522854 L65.0864256,75.4091629 L65.0864256,75.4091629 Z\"></path>\n    </g>\n</svg>\n"
        },
        "$:/core/images/close-others-button": {
            "title": "$:/core/images/close-others-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-close-others-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M64,128 C99.346224,128 128,99.346224 128,64 C128,28.653776 99.346224,0 64,0 C28.653776,0 0,28.653776 0,64 C0,99.346224 28.653776,128 64,128 Z M64,112 C90.509668,112 112,90.509668 112,64 C112,37.490332 90.509668,16 64,16 C37.490332,16 16,37.490332 16,64 C16,90.509668 37.490332,112 64,112 Z M64,96 C81.673112,96 96,81.673112 96,64 C96,46.326888 81.673112,32 64,32 C46.326888,32 32,46.326888 32,64 C32,81.673112 46.326888,96 64,96 Z M64,80 C72.836556,80 80,72.836556 80,64 C80,55.163444 72.836556,48 64,48 C55.163444,48 48,55.163444 48,64 C48,72.836556 55.163444,80 64,80 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/delete-button": {
            "title": "$:/core/images/delete-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-delete-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\" transform=\"translate(12.000000, 0.000000)\">\n        <rect x=\"0\" y=\"11\" width=\"105\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"28\" y=\"0\" width=\"48\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"8\" y=\"16\" width=\"16\" height=\"112\" rx=\"8\"></rect>\n        <rect x=\"8\" y=\"112\" width=\"88\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"80\" y=\"16\" width=\"16\" height=\"112\" rx=\"8\"></rect>\n        <rect x=\"56\" y=\"16\" width=\"16\" height=\"112\" rx=\"8\"></rect>\n        <rect x=\"32\" y=\"16\" width=\"16\" height=\"112\" rx=\"8\"></rect>\n    </g>\n</svg>"
        },
        "$:/core/images/done-button": {
            "title": "$:/core/images/done-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-done-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M3.52445141,76.8322939 C2.07397484,75.3828178 1.17514421,73.3795385 1.17514421,71.1666288 L1.17514421,23.1836596 C1.17514421,18.7531992 4.75686621,15.1751442 9.17514421,15.1751442 C13.5844319,15.1751442 17.1751442,18.7606787 17.1751442,23.1836596 L17.1751442,63.1751442 L119.173716,63.1751442 C123.590457,63.1751442 127.175144,66.7568662 127.175144,71.1751442 C127.175144,75.5844319 123.592783,79.1751442 119.173716,79.1751442 L9.17657227,79.1751442 C6.96796403,79.1751442 4.9674142,78.279521 3.51911285,76.8315312 Z\" id=\"Rectangle-285\" transform=\"translate(64.175144, 47.175144) rotate(-45.000000) translate(-64.175144, -47.175144) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/down-arrow": {
            "title": "$:/core/images/down-arrow",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-down-arrow tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <path d=\"M109.35638,81.3533152 C107.923899,82.7869182 105.94502,83.6751442 103.759224,83.6751442 L24.5910645,83.6751442 C20.225873,83.6751442 16.6751442,80.1307318 16.6751442,75.7584775 C16.6751442,71.3951199 20.2192225,67.8418109 24.5910645,67.8418109 L95.8418109,67.8418109 L95.8418109,-3.40893546 C95.8418109,-7.77412698 99.3862233,-11.3248558 103.758478,-11.3248558 C108.121835,-11.3248558 111.675144,-7.78077754 111.675144,-3.40893546 L111.675144,75.7592239 C111.675144,77.9416955 110.789142,79.9205745 109.356651,81.3538862 Z\" transform=\"translate(64.175144, 36.175144) rotate(45.000000) translate(-64.175144, -36.175144) \"></path>\n</svg>"
        },
        "$:/core/images/download-button": {
            "title": "$:/core/images/download-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-download-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\"><g fill-rule=\"evenodd\"><path class=\"tc-image-download-button-ring\" d=\"M64,128 C99.346224,128 128,99.346224 128,64 C128,28.653776 99.346224,0 64,0 C28.653776,0 0,28.653776 0,64 C0,99.346224 28.653776,128 64,128 Z M64,112 C90.509668,112 112,90.509668 112,64 C112,37.490332 90.509668,16 64,16 C37.490332,16 16,37.490332 16,64 C16,90.509668 37.490332,112 64,112 Z\"/><path d=\"M34.3496823,66.4308767 L61.2415823,93.634668 C63.0411536,95.4551107 65.9588502,95.4551107 67.7584215,93.634668 L94.6503215,66.4308767 C96.4498928,64.610434 96.4498928,61.6588981 94.6503215,59.8384554 C93.7861334,58.9642445 92.6140473,58.4731195 91.3919019,58.4731195 L82.9324098,58.4731195 C80.3874318,58.4731195 78.3243078,56.3860674 78.3243078,53.8115729 L78.3243078,38.6615466 C78.3243078,36.0870521 76.2611837,34 73.7162058,34 L55.283798,34 C52.7388201,34 50.675696,36.0870521 50.675696,38.6615466 L50.675696,38.6615466 L50.675696,53.8115729 C50.675696,56.3860674 48.612572,58.4731195 46.0675941,58.4731195 L37.608102,58.4731195 C35.063124,58.4731195 33,60.5601716 33,63.134666 C33,64.3709859 33.4854943,65.5566658 34.3496823,66.4308767 L34.3496823,66.4308767 Z\"/></g></svg>"
        },
        "$:/core/images/edit-button": {
            "title": "$:/core/images/edit-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-edit-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M116.870058,45.3431458 L108.870058,45.3431458 L108.870058,45.3431458 L108.870058,61.3431458 L116.870058,61.3431458 L116.870058,45.3431458 Z M124.870058,45.3431458 L127.649881,45.3431458 C132.066101,45.3431458 135.656854,48.9248678 135.656854,53.3431458 C135.656854,57.7524334 132.07201,61.3431458 127.649881,61.3431458 L124.870058,61.3431458 L124.870058,45.3431458 Z M100.870058,45.3431458 L15.6638275,45.3431458 C15.5064377,45.3431458 15.3501085,45.3476943 15.1949638,45.3566664 L15.1949638,45.3566664 C15.0628002,45.3477039 14.928279,45.3431458 14.7913977,45.3431458 C6.68160973,45.3431458 -8.34314575,53.3431458 -8.34314575,53.3431458 C-8.34314575,53.3431458 6.85614548,61.3431458 14.7913977,61.3431458 C14.9266533,61.3431458 15.0596543,61.3384973 15.190398,61.3293588 C15.3470529,61.3385075 15.5049057,61.3431458 15.6638275,61.3431458 L100.870058,61.3431458 L100.870058,45.3431458 L100.870058,45.3431458 Z\" transform=\"translate(63.656854, 53.343146) rotate(-45.000000) translate(-63.656854, -53.343146) \"></path>\n        <path d=\"M35.1714596,124.189544 C41.9594858,123.613403 49.068777,121.917633 58.85987,118.842282 C60.6854386,118.268877 62.4306907,117.705515 65.1957709,116.802278 C81.1962861,111.575575 87.0734839,109.994907 93.9414474,109.655721 C102.29855,109.242993 107.795169,111.785371 111.520478,118.355045 C112.610163,120.276732 115.051363,120.951203 116.97305,119.861518 C118.894737,118.771832 119.569207,116.330633 118.479522,114.408946 C113.146151,105.003414 104.734907,101.112919 93.5468356,101.66546 C85.6716631,102.054388 79.4899908,103.716944 62.7116783,109.197722 C59.9734132,110.092199 58.2519873,110.64787 56.4625698,111.20992 C37.002649,117.322218 25.6914684,118.282267 16.8654804,112.957098 C14.9739614,111.815848 12.5154166,112.424061 11.3741667,114.31558 C10.2329168,116.207099 10.84113,118.665644 12.7326489,119.806894 C19.0655164,123.627836 26.4866335,124.926678 35.1714596,124.189544 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/erase": {
            "title": "$:/core/images/erase",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-erase tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M60.0870401,127.996166 L123.102318,64.980888 C129.636723,58.4464827 129.629513,47.8655877 123.098967,41.3350425 L99.4657866,17.7018617 C92.927448,11.1635231 82.3486358,11.1698163 75.8199411,17.698511 L4.89768189,88.6207702 C-1.63672343,95.1551755 -1.6295126,105.736071 4.90103262,112.266616 L20.6305829,127.996166 L60.0870401,127.996166 Z M25.1375576,120.682546 L10.812569,106.357558 C7.5455063,103.090495 7.54523836,97.793808 10.8048093,94.5342371 L46.2691086,59.0699377 L81.7308914,94.5317205 L55.5800654,120.682546 L25.1375576,120.682546 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/excise": {
            "title": "$:/core/images/excise",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-excise tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M56,107.313709 L53.6568542,109.656854 C50.5326599,112.781049 45.4673401,112.781049 42.3431457,109.656854 C39.2189514,106.53266 39.2189514,101.46734 42.3431458,98.3431457 L58.3431458,82.3431457 C61.4673401,79.2189514 66.5326599,79.2189514 69.6568542,82.3431458 L85.6568542,98.3431458 C88.7810486,101.46734 88.7810486,106.53266 85.6568542,109.656854 C82.5326599,112.781049 77.4673401,112.781049 74.3431458,109.656854 L72,107.313708 L72,121.597798 C72,125.133636 68.418278,128 64,128 C59.581722,128 56,125.133636 56,121.597798 L56,107.313709 Z M0,40.0070969 C0,35.5848994 3.59071231,32 8,32 C12.418278,32 16,35.5881712 16,40.0070969 L16,71.9929031 C16,76.4151006 12.4092877,80 8,80 C3.581722,80 0,76.4118288 0,71.9929031 L0,40.0070969 Z M32,40.0070969 C32,35.5848994 35.5907123,32 40,32 C44.418278,32 48,35.5881712 48,40.0070969 L48,71.9929031 C48,76.4151006 44.4092877,80 40,80 C35.581722,80 32,76.4118288 32,71.9929031 L32,40.0070969 Z M80,40.0070969 C80,35.5848994 83.5907123,32 88,32 C92.418278,32 96,35.5881712 96,40.0070969 L96,71.9929031 C96,76.4151006 92.4092877,80 88,80 C83.581722,80 80,76.4118288 80,71.9929031 L80,40.0070969 Z M56,8.00709688 C56,3.58489938 59.5907123,0 64,0 C68.418278,0 72,3.58817117 72,8.00709688 L72,39.9929031 C72,44.4151006 68.4092877,48 64,48 C59.581722,48 56,44.4118288 56,39.9929031 L56,8.00709688 Z M112,40.0070969 C112,35.5848994 115.590712,32 120,32 C124.418278,32 128,35.5881712 128,40.0070969 L128,71.9929031 C128,76.4151006 124.409288,80 120,80 C115.581722,80 112,76.4118288 112,71.9929031 L112,40.0070969 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/export-button": {
            "title": "$:/core/images/export-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-export-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8.00348646,127.999999 C8.00464867,128 8.00581094,128 8.00697327,128 L119.993027,128 C122.205254,128 124.207939,127.101378 125.657096,125.651198 L125.656838,125.65759 C127.104563,124.210109 128,122.21009 128,119.999949 L128,56.0000511 C128,51.5817449 124.409288,48 120,48 C115.581722,48 112,51.5797863 112,56.0000511 L112,112 L16,112 L16,56.0000511 C16,51.5817449 12.4092877,48 8,48 C3.581722,48 7.10542736e-15,51.5797863 7.10542736e-15,56.0000511 L7.10542736e-15,119.999949 C7.10542736e-15,124.418255 3.59071231,128 8,128 C8.00116233,128 8.0023246,128 8.00348681,127.999999 Z M56.6235633,27.3113724 L47.6580188,36.2769169 C44.5333664,39.4015692 39.4634864,39.4061295 36.339292,36.2819351 C33.2214548,33.1640979 33.2173444,28.0901742 36.3443103,24.9632084 L58.9616908,2.34582788 C60.5248533,0.782665335 62.5748436,0.000361191261 64.624516,2.38225238e-14 L64.6193616,0.00151809229 C66.6695374,0.000796251595 68.7211167,0.781508799 70.2854358,2.34582788 L92.9028163,24.9632084 C96.0274686,28.0878607 96.0320289,33.1577408 92.9078345,36.2819351 C89.7899973,39.3997724 84.7160736,39.4038827 81.5891078,36.2769169 L72.6235633,27.3113724 L72.6235633,88.5669606 C72.6235633,92.9781015 69.0418413,96.5662064 64.6235633,96.5662064 C60.2142756,96.5662064 56.6235633,92.984822 56.6235633,88.5669606 L56.6235633,27.3113724 L56.6235633,27.3113724 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/file": {
            "title": "$:/core/images/file",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-file tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"nonzero\">\n        <path d=\"M111.96811,30.5 L112,30.5 L112,119.999079 C112,124.417866 108.419113,128 104.000754,128 L23.9992458,128 C19.5813843,128 16,124.417687 16,119.999079 L16,8.00092105 C16,3.58213437 19.5808867,0 23.9992458,0 L81,0 L81,0.0201838424 C83.1589869,-0.071534047 85.3482153,0.707077645 86.9982489,2.35711116 L109.625176,24.9840387 C111.151676,26.510538 111.932942,28.4998414 111.96811,30.5 L111.96811,30.5 Z M81,8 L24,8 L24,120 L104,120 L104,30.5 L89.0003461,30.5 C84.5818769,30.5 81,26.9216269 81,22.4996539 L81,8 Z\"></path>\n        <rect x=\"32\" y=\"36\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"52\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"68\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"84\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"100\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"20\" width=\"40\" height=\"8\" rx=\"4\"></rect>\n    </g>\n</svg>"
        },
        "$:/core/images/fixed-height": {
            "title": "$:/core/images/fixed-height",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-fixed-height tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M60,35.6568542 L50.8284271,44.8284271 C49.26633,46.3905243 46.73367,46.3905243 45.1715729,44.8284271 C43.6094757,43.26633 43.6094757,40.73367 45.1715729,39.1715729 L61.1715729,23.1715729 C62.73367,21.6094757 65.2663299,21.6094757 66.8284271,23.1715728 L82.8284278,39.1715728 C84.390525,40.7336699 84.390525,43.2663299 82.8284279,44.8284271 C81.2663308,46.3905243 78.7336709,46.3905243 77.1715737,44.8284272 L68,35.6568539 L68,93.3431461 L77.1715737,84.1715728 C78.7336709,82.6094757 81.2663308,82.6094757 82.8284279,84.1715729 C84.390525,85.7336701 84.390525,88.2663301 82.8284278,89.8284272 L66.8284271,105.828427 C65.2663299,107.390524 62.73367,107.390524 61.1715729,105.828427 L45.1715729,89.8284271 C43.6094757,88.26633 43.6094757,85.73367 45.1715729,84.1715729 C46.73367,82.6094757 49.26633,82.6094757 50.8284271,84.1715729 L60,93.3431458 L60,35.6568542 L60,35.6568542 Z M16,116 L112,116 C114.209139,116 116,114.209139 116,112 C116,109.790861 114.209139,108 112,108 L16,108 C13.790861,108 12,109.790861 12,112 C12,114.209139 13.790861,116 16,116 L16,116 Z M16,20 L112,20 C114.209139,20 116,18.209139 116,16 C116,13.790861 114.209139,12 112,12 L16,12 C13.790861,12 12,13.790861 12,16 C12,18.209139 13.790861,20 16,20 L16,20 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/fold-all-button": {
            "title": "$:/core/images/fold-all-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-fold-all tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"0\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"0\" y=\"64\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M64.0292774,58.6235628 C61.9791013,58.6242848 59.9275217,57.8435723 58.3632024,56.279253 L35.7458219,33.6618725 C32.6211696,30.5372202 32.6166093,25.4673401 35.7408036,22.3431458 C38.8586409,19.2253085 43.9325646,19.2211982 47.0595304,22.348164 L64.0250749,39.3137085 L80.9906194,22.348164 C84.1152717,19.2235117 89.1851518,19.2189514 92.3093461,22.3431458 C95.4271834,25.460983 95.4312937,30.5349067 92.3043279,33.6618725 L69.6869474,56.279253 C68.1237851,57.8424153 66.0737951,58.6247195 64.0241231,58.6250809 Z\" transform=\"translate(64.024316, 39.313708) scale(1, -1) translate(-64.024316, -39.313708) \"></path>\n        <path d=\"M64.0292774,123.621227 C61.9791013,123.621949 59.9275217,122.841236 58.3632024,121.276917 L35.7458219,98.6595365 C32.6211696,95.5348842 32.6166093,90.4650041 35.7408036,87.3408098 C38.8586409,84.2229725 43.9325646,84.2188622 47.0595304,87.345828 L64.0250749,104.311373 L80.9906194,87.345828 C84.1152717,84.2211757 89.1851518,84.2166154 92.3093461,87.3408098 C95.4271834,90.458647 95.4312937,95.5325707 92.3043279,98.6595365 L69.6869474,121.276917 C68.1237851,122.840079 66.0737951,123.622383 64.0241231,123.622745 Z\" transform=\"translate(64.024316, 104.311372) scale(1, -1) translate(-64.024316, -104.311372) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/fold-button": {
            "title": "$:/core/images/fold-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-fold tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"0\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M64.0292774,63.6235628 C61.9791013,63.6242848 59.9275217,62.8435723 58.3632024,61.279253 L35.7458219,38.6618725 C32.6211696,35.5372202 32.6166093,30.4673401 35.7408036,27.3431458 C38.8586409,24.2253085 43.9325646,24.2211982 47.0595304,27.348164 L64.0250749,44.3137085 L80.9906194,27.348164 C84.1152717,24.2235117 89.1851518,24.2189514 92.3093461,27.3431458 C95.4271834,30.460983 95.4312937,35.5349067 92.3043279,38.6618725 L69.6869474,61.279253 C68.1237851,62.8424153 66.0737951,63.6247195 64.0241231,63.6250809 Z\" transform=\"translate(64.024316, 44.313708) scale(1, -1) translate(-64.024316, -44.313708) \"></path>\n        <path d=\"M64.0049614,105.998482 C61.9547853,105.999204 59.9032057,105.218491 58.3388864,103.654172 L35.7215059,81.0367916 C32.5968535,77.9121393 32.5922933,72.8422592 35.7164876,69.7180649 C38.8343248,66.6002276 43.9082485,66.5961173 47.0352144,69.7230831 L64.0007589,86.6886276 L80.9663034,69.7230831 C84.0909557,66.5984308 89.1608358,66.5938705 92.2850301,69.7180649 C95.4028673,72.8359021 95.4069777,77.9098258 92.2800119,81.0367916 L69.6626314,103.654172 C68.099469,105.217334 66.0494791,105.999639 63.999807,106 Z\" transform=\"translate(64.000000, 86.688628) scale(1, -1) translate(-64.000000, -86.688628) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/fold-others-button": {
            "title": "$:/core/images/fold-others-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-fold-others tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"56.0314331\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M101.657101,104.948818 C100.207918,103.498614 98.2051847,102.599976 95.9929031,102.599976 L72,102.599976 L72,78.6070725 C72,76.3964271 71.1036108,74.3936927 69.6545293,72.9441002 L69.6571005,72.9488183 C68.2079177,71.4986143 66.2051847,70.5999756 63.9929031,70.5999756 L32.0070969,70.5999756 C27.5881712,70.5999756 24,74.1816976 24,78.5999756 C24,83.0092633 27.5848994,86.5999756 32.0070969,86.5999756 L56,86.5999756 L56,110.592879 C56,112.803524 56.8963895,114.806259 58.3454713,116.255852 L58.3429,116.251133 C59.7920828,117.701337 61.7948156,118.599976 64.0070969,118.599976 L88,118.599976 L88,142.592879 C88,147.011804 91.581722,150.599976 96,150.599976 C100.409288,150.599976 104,147.015076 104,142.592879 L104,110.607072 C104,108.396427 103.103611,106.393693 101.654529,104.9441 Z\" transform=\"translate(64.000000, 110.599976) rotate(-45.000000) translate(-64.000000, -110.599976) \"></path>\n        <path d=\"M101.725643,11.7488671 C100.27646,10.2986632 98.2737272,9.40002441 96.0614456,9.40002441 L72.0685425,9.40002441 L72.0685425,-14.5928787 C72.0685425,-16.8035241 71.1721533,-18.8062584 69.7230718,-20.255851 L69.725643,-20.2511329 C68.2764602,-21.7013368 66.2737272,-22.5999756 64.0614456,-22.5999756 L32.0756394,-22.5999756 C27.6567137,-22.5999756 24.0685425,-19.0182536 24.0685425,-14.5999756 C24.0685425,-10.1906879 27.6534419,-6.59997559 32.0756394,-6.59997559 L56.0685425,-6.59997559 L56.0685425,17.3929275 C56.0685425,19.6035732 56.964932,21.6063078 58.4140138,23.0559004 L58.4114425,23.0511823 C59.8606253,24.5013859 61.8633581,25.4000244 64.0756394,25.4000244 L88.0685425,25.4000244 L88.0685425,49.3929275 C88.0685425,53.8118532 91.6502645,57.4000244 96.0685425,57.4000244 C100.47783,57.4000244 104.068542,53.815125 104.068542,49.3929275 L104.068542,17.4071213 C104.068542,15.1964759 103.172153,13.1937416 101.723072,11.744149 Z\" transform=\"translate(64.068542, 17.400024) scale(1, -1) rotate(-45.000000) translate(-64.068542, -17.400024) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/folder": {
            "title": "$:/core/images/folder",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-folder tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M55.6943257,128.000004 L7.99859666,128.000004 C3.5810937,128.000004 0,124.413822 0,119.996384 L0,48.0036243 C0,43.5833471 3.58387508,40.0000044 7.99859666,40.0000044 L16,40.0000044 L16,31.9999914 C16,27.5817181 19.5783731,24 24.0003461,24 L55.9996539,24 C60.4181231,24 64,27.5800761 64,31.9999914 L64,40.0000044 L104.001403,40.0000044 C108.418906,40.0000044 112,43.5861868 112,48.0036243 L112,59.8298353 L104,59.7475921 L104,51.9994189 C104,49.7887607 102.207895,48.0000044 99.9972215,48.0000044 L56,48.0000044 L56,36.0000255 C56,33.7898932 54.2072328,32 51.9957423,32 L28.0042577,32 C25.7890275,32 24,33.7908724 24,36.0000255 L24,48.0000044 L12.0027785,48.0000044 C9.78987688,48.0000044 8,49.7906032 8,51.9994189 L8,116.00059 C8,118.211248 9.79210499,120.000004 12.0027785,120.000004 L58.7630167,120.000004 L55.6943257,128.000004 L55.6943257,128.000004 Z\"></path>\n        <path d=\"M23.8728955,55.5 L119.875702,55.5 C124.293205,55.5 126.87957,59.5532655 125.650111,64.5630007 L112.305967,118.936999 C111.077582,123.942356 106.497904,128 102.083183,128 L6.08037597,128 C1.66287302,128 -0.923492342,123.946735 0.305967145,118.936999 L13.650111,64.5630007 C14.878496,59.5576436 19.4581739,55.5 23.8728955,55.5 L23.8728955,55.5 L23.8728955,55.5 Z M25.6530124,64 L113.647455,64 C115.858129,64 117.151473,66.0930612 116.538306,68.6662267 L105.417772,115.333773 C104.803671,117.910859 102.515967,120 100.303066,120 L12.3086228,120 C10.0979492,120 8.8046054,117.906939 9.41777189,115.333773 L20.5383062,68.6662267 C21.1524069,66.0891409 23.4401107,64 25.6530124,64 L25.6530124,64 L25.6530124,64 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/full-screen-button": {
            "title": "$:/core/images/full-screen-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-full-screen-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g>\n        <g>\n            <path d=\"M5.29777586e-31,8 C1.59060409e-15,3.581722 3.581722,0 8,0 L40,0 C44.418278,0 48,3.581722 48,8 C48,12.418278 44.418278,16 40,16 L16,16 L16,40 C16,44.418278 12.418278,48 8,48 C3.581722,48 -3.55271368e-15,44.418278 0,40 L3.55271368e-15,8 Z\"></path>\n        </g>\n        <g transform=\"translate(104.000000, 104.000000) rotate(-180.000000) translate(-104.000000, -104.000000) translate(80.000000, 80.000000)\">\n            <path d=\"M5.29777586e-31,8 C1.59060409e-15,3.581722 3.581722,0 8,0 L40,0 C44.418278,0 48,3.581722 48,8 C48,12.418278 44.418278,16 40,16 L16,16 L16,40 C16,44.418278 12.418278,48 8,48 C3.581722,48 -3.55271368e-15,44.418278 0,40 L3.55271368e-15,8 Z\"></path>\n        </g>\n        <g transform=\"translate(24.000000, 104.000000) rotate(-90.000000) translate(-24.000000, -104.000000) translate(0.000000, 80.000000)\">\n            <path d=\"M5.29777586e-31,8 C1.59060409e-15,3.581722 3.581722,0 8,0 L40,0 C44.418278,0 48,3.581722 48,8 C48,12.418278 44.418278,16 40,16 L16,16 L16,40 C16,44.418278 12.418278,48 8,48 C3.581722,48 -3.55271368e-15,44.418278 0,40 L3.55271368e-15,8 Z\"></path>\n        </g>\n        <g transform=\"translate(104.000000, 24.000000) rotate(90.000000) translate(-104.000000, -24.000000) translate(80.000000, 0.000000)\">\n            <path d=\"M5.29777586e-31,8 C1.59060409e-15,3.581722 3.581722,0 8,0 L40,0 C44.418278,0 48,3.581722 48,8 C48,12.418278 44.418278,16 40,16 L16,16 L16,40 C16,44.418278 12.418278,48 8,48 C3.581722,48 -3.55271368e-15,44.418278 0,40 L3.55271368e-15,8 Z\"></path>\n        </g>\n    </g>\n</svg>"
        },
        "$:/core/images/github": {
            "title": "$:/core/images/github",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-github tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n        <g fill-rule=\"evenodd\">\n            <path d=\"M63.9383506,1.60695328 C28.6017227,1.60695328 -0.055756057,30.2970814 -0.055756057,65.6906208 C-0.055756057,94.003092 18.2804728,118.019715 43.7123154,126.493393 C46.9143781,127.083482 48.0812647,125.104717 48.0812647,123.405261 C48.0812647,121.886765 48.02626,117.85449 47.9948287,112.508284 C30.1929317,116.379268 26.4368926,103.916587 26.4368926,103.916587 C23.5255693,96.5129372 19.3294921,94.5420399 19.3294921,94.5420399 C13.5186324,90.5687739 19.7695302,90.6474524 19.7695302,90.6474524 C26.1933001,91.099854 29.5721638,97.2525155 29.5721638,97.2525155 C35.2808718,107.044059 44.5531024,104.215566 48.1991321,102.575118 C48.7806109,98.4366275 50.4346826,95.612068 52.2616263,94.0109598 C38.0507543,92.3941159 23.1091047,86.8944862 23.1091047,62.3389152 C23.1091047,55.3443933 25.6039634,49.6205298 29.6978889,45.1437211 C29.0378318,43.5229433 26.8415704,37.0044266 30.3265147,28.1845627 C30.3265147,28.1845627 35.6973364,26.4615028 47.9241083,34.7542205 C53.027764,33.330139 58.5046663,32.6220321 63.9462084,32.5944947 C69.3838216,32.6220321 74.856795,33.330139 79.9683085,34.7542205 C92.1872225,26.4615028 97.5501864,28.1845627 97.5501864,28.1845627 C101.042989,37.0044266 98.8467271,43.5229433 98.190599,45.1437211 C102.292382,49.6205298 104.767596,55.3443933 104.767596,62.3389152 C104.767596,86.9574291 89.8023734,92.3744463 75.5482834,93.9598188 C77.8427675,95.9385839 79.8897303,99.8489072 79.8897303,105.828476 C79.8897303,114.392635 79.8111521,121.304544 79.8111521,123.405261 C79.8111521,125.120453 80.966252,127.114954 84.2115327,126.489459 C109.623731,117.996111 127.944244,93.9952241 127.944244,65.6906208 C127.944244,30.2970814 99.2867652,1.60695328 63.9383506,1.60695328\"></path>\n        </g>\n    </svg>\n"
        },
        "$:/core/images/globe": {
            "title": "$:/core/images/globe",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-globe tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M72.8111354,37.1275855 C72.8111354,37.9789875 72.8111354,38.8303894 72.8111354,39.6817913 C72.8111354,41.8784743 73.7885604,46.5631866 72.8111354,48.5143758 C71.3445471,51.4420595 68.1617327,52.0543531 66.4170946,54.3812641 C65.2352215,55.9575873 61.7987417,64.9821523 62.7262858,67.3005778 C66.6959269,77.2228204 74.26087,70.4881886 80.6887657,76.594328 C81.5527211,77.415037 83.5758191,78.8666631 83.985137,79.8899578 C87.2742852,88.1128283 76.4086873,94.8989524 87.7419325,106.189751 C88.9872885,107.430443 91.555495,102.372895 91.8205061,101.575869 C92.6726866,99.0129203 98.5458765,96.1267309 100.908882,94.5234439 C102.928056,93.1534443 105.782168,91.8557166 107.236936,89.7775886 C109.507391,86.5342557 108.717505,82.2640435 110.334606,79.0328716 C112.473794,74.7585014 114.163418,69.3979002 116.332726,65.0674086 C120.230862,57.2857361 121.054075,67.1596684 121.400359,67.5059523 C121.757734,67.8633269 122.411167,67.5059523 122.916571,67.5059523 C123.011132,67.5059523 124.364019,67.6048489 124.432783,67.5059523 C125.0832,66.5705216 123.390209,49.5852316 123.114531,48.2089091 C121.710578,41.1996597 116.17083,32.4278331 111.249523,27.7092761 C104.975994,21.6942076 104.160516,11.5121686 92.9912146,12.7547535 C92.7872931,12.7774397 87.906794,22.9027026 85.2136766,26.2672064 C81.486311,30.9237934 82.7434931,22.1144904 78.6876623,22.1144904 C78.6065806,22.1144904 77.5045497,22.0107615 77.4353971,22.1144904 C76.8488637,22.9942905 75.9952305,26.0101404 75.1288269,26.5311533 C74.8635477,26.6906793 73.4071369,26.2924966 73.2826811,26.5311533 C71.0401728,30.8313939 81.5394677,28.7427264 79.075427,34.482926 C76.7225098,39.9642538 72.747373,32.4860199 72.747373,43.0434079\"></path>\n        <path d=\"M44.4668556,7.01044608 C54.151517,13.1403033 45.1489715,19.2084878 47.1611905,23.2253896 C48.8157833,26.5283781 51.4021933,28.6198851 48.8753629,33.038878 C46.8123257,36.6467763 42.0052989,37.0050492 39.251679,39.7621111 C36.2115749,42.8060154 33.7884281,48.7028116 32.4624592,52.6732691 C30.8452419,57.5158356 47.0088721,59.5388126 44.5246867,63.6811917 C43.1386839,65.9923513 37.7785192,65.1466282 36.0880227,63.8791519 C34.9234453,63.0059918 32.4946425,63.3331166 31.6713597,62.0997342 C29.0575851,58.1839669 29.4107339,54.0758543 28.0457962,49.9707786 C27.1076833,47.1493864 21.732611,47.8501656 20.2022714,49.3776393 C19.6790362,49.8998948 19.8723378,51.1703278 19.8723378,51.8829111 C19.8723378,57.1682405 26.9914913,55.1986414 26.9914913,58.3421973 C26.9914913,72.9792302 30.9191897,64.8771867 38.1313873,69.6793121 C48.1678018,76.3618966 45.9763926,76.981595 53.0777543,84.0829567 C56.7511941,87.7563965 60.8192437,87.7689005 62.503478,93.3767069 C64.1046972,98.7081071 53.1759798,98.7157031 50.786754,100.825053 C49.663965,101.816317 47.9736094,104.970571 46.5680513,105.439676 C44.7757187,106.037867 43.334221,105.93607 41.6242359,107.219093 C39.1967302,109.040481 37.7241465,112.151588 37.6034934,112.030935 C35.4555278,109.88297 34.0848666,96.5511248 33.7147244,93.7726273 C33.1258872,89.3524817 28.1241923,88.2337027 26.7275443,84.7420826 C25.1572737,80.8164061 28.2518481,75.223612 25.599097,70.9819941 C19.0797019,60.557804 13.7775712,56.4811506 10.2493953,44.6896152 C9.3074899,41.5416683 13.5912267,38.1609942 15.1264825,35.8570308 C17.0029359,33.0410312 17.7876232,30.0028946 19.8723378,27.2224065 C22.146793,24.1888519 40.8551166,9.46076832 43.8574051,8.63490613 L44.4668556,7.01044608 Z\"></path>\n        <path d=\"M64,126 C98.2416545,126 126,98.2416545 126,64 C126,29.7583455 98.2416545,2 64,2 C29.7583455,2 2,29.7583455 2,64 C2,98.2416545 29.7583455,126 64,126 Z M64,120 C94.927946,120 120,94.927946 120,64 C120,33.072054 94.927946,8 64,8 C33.072054,8 8,33.072054 8,64 C8,94.927946 33.072054,120 64,120 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-1": {
            "title": "$:/core/images/heading-1",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-1 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M14,30 L27.25,30 L27.25,60.104 L61.7,60.104 L61.7,30 L74.95,30 L74.95,105.684 L61.7,105.684 L61.7,71.552 L27.25,71.552 L27.25,105.684 L14,105.684 L14,30 Z M84.3350766,43.78 C86.8790893,43.78 89.3523979,43.5680021 91.7550766,43.144 C94.1577553,42.7199979 96.3307336,42.0133383 98.2740766,41.024 C100.21742,40.0346617 101.87807,38.7626744 103.256077,37.208 C104.634084,35.6533256 105.535075,33.7453446 105.959077,31.484 L115.817077,31.484 L115.817077,105.684 L102.567077,105.684 L102.567077,53.32 L84.3350766,53.32 L84.3350766,43.78 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-2": {
            "title": "$:/core/images/heading-2",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-2 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M6,30 L19.25,30 L19.25,60.104 L53.7,60.104 L53.7,30 L66.95,30 L66.95,105.684 L53.7,105.684 L53.7,71.552 L19.25,71.552 L19.25,105.684 L6,105.684 L6,30 Z M125.519077,105.684 L74.8510766,105.684 C74.9217436,99.5359693 76.4057288,94.1653563 79.3030766,89.572 C82.2004244,84.9786437 86.1577182,80.986017 91.1750766,77.594 C93.5777553,75.8273245 96.0863969,74.113675 98.7010766,72.453 C101.315756,70.792325 103.718399,69.0080095 105.909077,67.1 C108.099754,65.1919905 109.901736,63.1250111 111.315077,60.899 C112.728417,58.6729889 113.47041,56.1113478 113.541077,53.214 C113.541077,51.8713266 113.382078,50.4403409 113.064077,48.921 C112.746075,47.4016591 112.127748,45.9883399 111.209077,44.681 C110.290405,43.3736601 109.018418,42.2783377 107.393077,41.395 C105.767735,40.5116622 103.647756,40.07 101.033077,40.07 C98.6303979,40.07 96.6340846,40.5469952 95.0440766,41.501 C93.4540687,42.4550048 92.1820814,43.762325 91.2280766,45.423 C90.2740719,47.083675 89.5674123,49.0446554 89.1080766,51.306 C88.648741,53.5673446 88.3837436,56.0053203 88.3130766,58.62 L76.2290766,58.62 C76.2290766,54.5213128 76.7767378,50.7230175 77.8720766,47.225 C78.9674154,43.7269825 80.610399,40.7060127 82.8010766,38.162 C84.9917542,35.6179873 87.6593942,33.6216739 90.8040766,32.173 C93.948759,30.7243261 97.6057224,30 101.775077,30 C106.297766,30 110.078395,30.7419926 113.117077,32.226 C116.155758,33.7100074 118.611401,35.5826554 120.484077,37.844 C122.356753,40.1053446 123.681739,42.5609868 124.459077,45.211 C125.236414,47.8610133 125.625077,50.3873213 125.625077,52.79 C125.625077,55.7580148 125.165748,58.4433213 124.247077,60.846 C123.328405,63.2486787 122.091751,65.4569899 120.537077,67.471 C118.982402,69.4850101 117.215753,71.3399915 115.237077,73.036 C113.2584,74.7320085 111.209087,76.3219926 109.089077,77.806 C106.969066,79.2900074 104.849087,80.7033266 102.729077,82.046 C100.609066,83.3886734 98.6480856,84.7313266 96.8460766,86.074 C95.0440676,87.4166734 93.47175,88.8123261 92.1290766,90.261 C90.7864032,91.7096739 89.8677458,93.2466585 89.3730766,94.872 L125.519077,94.872 L125.519077,105.684 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-3": {
            "title": "$:/core/images/heading-3",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-3 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M6,30 L19.25,30 L19.25,60.104 L53.7,60.104 L53.7,30 L66.95,30 L66.95,105.684 L53.7,105.684 L53.7,71.552 L19.25,71.552 L19.25,105.684 L6,105.684 L6,30 Z M94.8850766,62.224 C96.8637532,62.294667 98.8424001,62.1533351 100.821077,61.8 C102.799753,61.4466649 104.566402,60.8283378 106.121077,59.945 C107.675751,59.0616623 108.930072,57.8426744 109.884077,56.288 C110.838081,54.7333256 111.315077,52.8253446 111.315077,50.564 C111.315077,47.3839841 110.237421,44.8400095 108.082077,42.932 C105.926733,41.0239905 103.153094,40.07 99.7610766,40.07 C97.641066,40.07 95.8037511,40.4939958 94.2490766,41.342 C92.6944022,42.1900042 91.4047484,43.3383261 90.3800766,44.787 C89.3554048,46.2356739 88.5957458,47.860991 88.1010766,49.663 C87.6064075,51.465009 87.3944096,53.3199905 87.4650766,55.228 L75.3810766,55.228 C75.5224107,51.623982 76.1937373,48.2850154 77.3950766,45.211 C78.596416,42.1369846 80.2393995,39.4693446 82.3240766,37.208 C84.4087537,34.9466554 86.9350618,33.1800064 89.9030766,31.908 C92.8710915,30.6359936 96.2277246,30 99.9730766,30 C102.870424,30 105.714729,30.4239958 108.506077,31.272 C111.297424,32.1200042 113.806065,33.3566585 116.032077,34.982 C118.258088,36.6073415 120.042403,38.6743208 121.385077,41.183 C122.72775,43.6916792 123.399077,46.5713171 123.399077,49.822 C123.399077,53.5673521 122.551085,56.8356527 120.855077,59.627 C119.159068,62.4183473 116.509095,64.4499936 112.905077,65.722 L112.905077,65.934 C117.145098,66.7820042 120.448731,68.8843166 122.816077,72.241 C125.183422,75.5976835 126.367077,79.6786426 126.367077,84.484 C126.367077,88.017351 125.660417,91.1796527 124.247077,93.971 C122.833736,96.7623473 120.925755,99.129657 118.523077,101.073 C116.120398,103.016343 113.329093,104.517995 110.149077,105.578 C106.969061,106.638005 103.612428,107.168 100.079077,107.168 C95.7683884,107.168 92.005426,106.549673 88.7900766,105.313 C85.5747272,104.076327 82.8894207,102.327345 80.7340766,100.066 C78.5787325,97.8046554 76.9357489,95.0840159 75.8050766,91.904 C74.6744043,88.7239841 74.0737436,85.1906861 74.0030766,81.304 L86.0870766,81.304 C85.9457426,85.8266893 87.0587315,89.5896517 89.4260766,92.593 C91.7934218,95.5963483 95.3443863,97.098 100.079077,97.098 C104.107097,97.098 107.481396,95.9496782 110.202077,93.653 C112.922757,91.3563219 114.283077,88.0880212 114.283077,83.848 C114.283077,80.9506522 113.717749,78.6540085 112.587077,76.958 C111.456404,75.2619915 109.972419,73.9723378 108.135077,73.089 C106.297734,72.2056623 104.230755,71.6580011 101.934077,71.446 C99.6373985,71.2339989 97.2877553,71.163333 94.8850766,71.234 L94.8850766,62.224 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-4": {
            "title": "$:/core/images/heading-4",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-4 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8,30 L21.25,30 L21.25,60.104 L55.7,60.104 L55.7,30 L68.95,30 L68.95,105.684 L55.7,105.684 L55.7,71.552 L21.25,71.552 L21.25,105.684 L8,105.684 L8,30 Z M84.5890766,78.548 L107.061077,78.548 L107.061077,45.9 L106.849077,45.9 L84.5890766,78.548 Z M128.049077,88.088 L118.509077,88.088 L118.509077,105.684 L107.061077,105.684 L107.061077,88.088 L75.2610766,88.088 L75.2610766,76.11 L107.061077,31.484 L118.509077,31.484 L118.509077,78.548 L128.049077,78.548 L128.049077,88.088 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-5": {
            "title": "$:/core/images/heading-5",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-5 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M6,30 L19.25,30 L19.25,60.104 L53.7,60.104 L53.7,30 L66.95,30 L66.95,105.684 L53.7,105.684 L53.7,71.552 L19.25,71.552 L19.25,105.684 L6,105.684 L6,30 Z M83.7550766,31.484 L122.127077,31.484 L122.127077,42.296 L92.7650766,42.296 L88.9490766,61.164 L89.1610766,61.376 C90.7864181,59.5386575 92.8533974,58.1430048 95.3620766,57.189 C97.8707558,56.2349952 100.361731,55.758 102.835077,55.758 C106.509762,55.758 109.795729,56.3763272 112.693077,57.613 C115.590424,58.8496729 118.0284,60.5809889 120.007077,62.807 C121.985753,65.0330111 123.487405,67.6653181 124.512077,70.704 C125.536748,73.7426819 126.049077,77.028649 126.049077,80.562 C126.049077,83.5300148 125.572081,86.5863176 124.618077,89.731 C123.664072,92.8756824 122.144754,95.7376538 120.060077,98.317 C117.9754,100.896346 115.30776,103.016325 112.057077,104.677 C108.806394,106.337675 104.919766,107.168 100.397077,107.168 C96.7930586,107.168 93.454092,106.691005 90.3800766,105.737 C87.3060613,104.782995 84.6030883,103.35201 82.2710766,101.444 C79.939065,99.5359905 78.0840835,97.1863473 76.7060766,94.395 C75.3280697,91.6036527 74.5684107,88.3353521 74.4270766,84.59 L86.5110766,84.59 C86.8644117,88.6180201 88.2423979,91.7096559 90.6450766,93.865 C93.0477553,96.0203441 96.2277235,97.098 100.185077,97.098 C102.729089,97.098 104.884401,96.6740042 106.651077,95.826 C108.417752,94.9779958 109.848738,93.8120074 110.944077,92.328 C112.039415,90.8439926 112.816741,89.1126766 113.276077,87.134 C113.735412,85.1553234 113.965077,83.0353446 113.965077,80.774 C113.965077,78.7246564 113.682413,76.763676 113.117077,74.891 C112.55174,73.018324 111.703749,71.3753404 110.573077,69.962 C109.442404,68.5486596 107.976086,67.4180042 106.174077,66.57 C104.372068,65.7219958 102.269755,65.298 99.8670766,65.298 C97.3230639,65.298 94.9380878,65.7749952 92.7120766,66.729 C90.4860655,67.6830048 88.8784149,69.4673203 87.8890766,72.082 L75.8050766,72.082 L83.7550766,31.484 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-6": {
            "title": "$:/core/images/heading-6",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-6 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M6,30 L19.25,30 L19.25,60.104 L53.7,60.104 L53.7,30 L66.95,30 L66.95,105.684 L53.7,105.684 L53.7,71.552 L19.25,71.552 L19.25,105.684 L6,105.684 L6,30 Z M112.587077,50.246 C112.304409,47.2073181 111.226753,44.751676 109.354077,42.879 C107.481401,41.006324 104.955093,40.07 101.775077,40.07 C99.584399,40.07 97.6940846,40.4763293 96.1040766,41.289 C94.5140687,42.1016707 93.1714154,43.1793266 92.0760766,44.522 C90.9807378,45.8646734 90.0974133,47.401658 89.4260766,49.133 C88.7547399,50.864342 88.2070787,52.6839905 87.7830766,54.592 C87.3590745,56.5000095 87.0587442,58.390324 86.8820766,60.263 C86.7054091,62.135676 86.5464107,63.8846585 86.4050766,65.51 L86.6170766,65.722 C88.2424181,62.7539852 90.4860623,60.5456739 93.3480766,59.097 C96.2100909,57.6483261 99.3017267,56.924 102.623077,56.924 C106.297762,56.924 109.583729,57.5599936 112.481077,58.832 C115.378424,60.1040064 117.834067,61.8529889 119.848077,64.079 C121.862087,66.3050111 123.399071,68.9373181 124.459077,71.976 C125.519082,75.0146819 126.049077,78.300649 126.049077,81.834 C126.049077,85.438018 125.466082,88.7769846 124.300077,91.851 C123.134071,94.9250154 121.455754,97.6103219 119.265077,99.907 C117.074399,102.203678 114.459758,103.987994 111.421077,105.26 C108.382395,106.532006 105.025762,107.168 101.351077,107.168 C95.9097161,107.168 91.4400941,106.16101 87.9420766,104.147 C84.4440591,102.13299 81.6880867,99.3770175 79.6740766,95.879 C77.6600666,92.3809825 76.2644138,88.2823568 75.4870766,83.583 C74.7097394,78.8836432 74.3210766,73.8133605 74.3210766,68.372 C74.3210766,63.9199777 74.7980719,59.4326893 75.7520766,54.91 C76.7060814,50.3873107 78.278399,46.2710186 80.4690766,42.561 C82.6597542,38.8509815 85.5393921,35.8300117 89.1080766,33.498 C92.6767611,31.1659883 97.0757171,30 102.305077,30 C105.273091,30 108.064397,30.4946617 110.679077,31.484 C113.293756,32.4733383 115.608067,33.8513245 117.622077,35.618 C119.636087,37.3846755 121.27907,39.5046543 122.551077,41.978 C123.823083,44.4513457 124.529743,47.2073181 124.671077,50.246 L112.587077,50.246 Z M100.927077,97.098 C103.117754,97.098 105.025735,96.6563378 106.651077,95.773 C108.276418,94.8896623 109.636738,93.7413404 110.732077,92.328 C111.827415,90.9146596 112.640074,89.271676 113.170077,87.399 C113.700079,85.526324 113.965077,83.6006766 113.965077,81.622 C113.965077,79.6433234 113.700079,77.7353425 113.170077,75.898 C112.640074,74.0606575 111.827415,72.4530069 110.732077,71.075 C109.636738,69.6969931 108.276418,68.5840042 106.651077,67.736 C105.025735,66.8879958 103.117754,66.464 100.927077,66.464 C98.736399,66.464 96.8107516,66.8703293 95.1500766,67.683 C93.4894017,68.4956707 92.0937489,69.5909931 90.9630766,70.969 C89.8324043,72.3470069 88.9844128,73.9546575 88.4190766,75.792 C87.8537405,77.6293425 87.5710766,79.5726564 87.5710766,81.622 C87.5710766,83.6713436 87.8537405,85.6146575 88.4190766,87.452 C88.9844128,89.2893425 89.8324043,90.9323261 90.9630766,92.381 C92.0937489,93.8296739 93.4894017,94.9779958 95.1500766,95.826 C96.8107516,96.6740042 98.736399,97.098 100.927077,97.098 L100.927077,97.098 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/help": {
            "title": "$:/core/images/help",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-help tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M36.0548906,111.44117 C30.8157418,115.837088 20.8865444,118.803477 9.5,118.803477 C7.86465619,118.803477 6.25937294,118.742289 4.69372699,118.624467 C12.612543,115.984876 18.7559465,110.02454 21.0611049,102.609942 C8.74739781,92.845129 1.04940554,78.9359851 1.04940554,63.5 C1.04940554,33.9527659 29.2554663,10 64.0494055,10 C98.8433448,10 127.049406,33.9527659 127.049406,63.5 C127.049406,93.0472341 98.8433448,117 64.0494055,117 C53.9936953,117 44.48824,114.999337 36.0548906,111.44117 L36.0548906,111.44117 Z M71.4042554,77.5980086 C71.406883,77.2865764 71.4095079,76.9382011 71.4119569,76.5610548 C71.4199751,75.3262169 71.4242825,74.0811293 71.422912,72.9158546 C71.4215244,71.736154 71.4143321,70.709635 71.4001396,69.8743525 C71.4078362,68.5173028 71.9951951,67.7870427 75.1273009,65.6385471 C75.2388969,65.5619968 76.2124091,64.8981068 76.5126553,64.6910879 C79.6062455,62.5580654 81.5345849,60.9050204 83.2750652,58.5038955 C85.6146327,55.2762841 86.8327108,51.426982 86.8327108,46.8554323 C86.8327108,33.5625756 76.972994,24.9029551 65.3778484,24.9029551 C54.2752771,24.9029551 42.8794554,34.5115163 41.3121702,47.1975534 C40.9043016,50.4989536 43.2499725,53.50591 46.5513726,53.9137786 C49.8527728,54.3216471 52.8597292,51.9759763 53.2675978,48.6745761 C54.0739246,42.1479456 60.2395837,36.9492759 65.3778484,36.9492759 C70.6427674,36.9492759 74.78639,40.5885487 74.78639,46.8554323 C74.78639,50.4892974 73.6853224,52.008304 69.6746221,54.7736715 C69.4052605,54.9593956 68.448509,55.6118556 68.3131127,55.7047319 C65.6309785,57.5445655 64.0858213,58.803255 62.6123358,60.6352315 C60.5044618,63.2559399 59.3714208,66.3518252 59.3547527,69.9487679 C59.3684999,70.8407274 59.3752803,71.8084521 59.3765995,72.9300232 C59.3779294,74.0607297 59.3737237,75.2764258 59.36589,76.482835 C59.3634936,76.8518793 59.3609272,77.1924914 59.3583633,77.4963784 C59.3568319,77.6778944 59.3556368,77.8074256 59.3549845,77.8730928 C59.3219814,81.1994287 61.9917551,83.9227111 65.318091,83.9557142 C68.644427,83.9887173 71.3677093,81.3189435 71.4007124,77.9926076 C71.4014444,77.9187458 71.402672,77.7856841 71.4042554,77.5980086 Z M65.3778489,102.097045 C69.5359735,102.097045 72.9067994,98.7262189 72.9067994,94.5680944 C72.9067994,90.4099698 69.5359735,87.0391439 65.3778489,87.0391439 C61.2197243,87.0391439 57.8488984,90.4099698 57.8488984,94.5680944 C57.8488984,98.7262189 61.2197243,102.097045 65.3778489,102.097045 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/home-button": {
            "title": "$:/core/images/home-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-home-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M112.9847,119.501583 C112.99485,119.336814 113,119.170705 113,119.003406 L113,67.56802 C116.137461,70.5156358 121.076014,70.4518569 124.133985,67.3938855 C127.25818,64.2696912 127.260618,59.2068102 124.131541,56.0777326 L70.3963143,2.34250601 C68.8331348,0.779326498 66.7828947,-0.000743167069 64.7337457,1.61675364e-05 C62.691312,-0.00409949529 60.6426632,0.777559815 59.077717,2.34250601 L33,28.420223 L33,28.420223 L33,8.00697327 C33,3.58484404 29.4092877,0 25,0 C20.581722,0 17,3.59075293 17,8.00697327 L17,44.420223 L5.3424904,56.0777326 C2.21694607,59.2032769 2.22220878,64.2760483 5.34004601,67.3938855 C8.46424034,70.5180798 13.5271213,70.5205187 16.6561989,67.3914411 L17,67.04764 L17,119.993027 C17,119.994189 17.0000002,119.995351 17.0000007,119.996514 C17.0000002,119.997675 17,119.998838 17,120 C17,124.418278 20.5881049,128 24.9992458,128 L105.000754,128 C109.418616,128 113,124.409288 113,120 C113,119.832611 112.99485,119.666422 112.9847,119.501583 Z M97,112 L97,51.5736087 L97,51.5736087 L64.7370156,19.3106244 L33,51.04764 L33,112 L97,112 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/import-button": {
            "title": "$:/core/images/import-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-import-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M105.449437,94.2138951 C105.449437,94.2138951 110.049457,94.1897106 110.049457,99.4026111 C110.049457,104.615512 105.163246,104.615511 105.163246,104.615511 L45.0075072,105.157833 C45.0075072,105.157833 0.367531803,106.289842 0.367532368,66.6449212 C0.367532934,27.0000003 45.0428249,27.0000003 45.0428249,27.0000003 L105.532495,27.0000003 C105.532495,27.0000003 138.996741,25.6734987 138.996741,55.1771866 C138.996741,84.6808745 105.727102,82.8457535 105.727102,82.8457535 L56.1735087,82.8457535 C56.1735087,82.8457535 22.6899229,85.1500223 22.6899229,66.0913753 C22.6899229,47.0327282 56.1735087,49.3383013 56.1735087,49.3383013 L105.727102,49.3383013 C105.727102,49.3383013 111.245209,49.3383024 111.245209,54.8231115 C111.245209,60.3079206 105.727102,60.5074524 105.727102,60.5074524 L56.1735087,60.5074524 C56.1735087,60.5074524 37.48913,60.5074528 37.48913,66.6449195 C37.48913,72.7823862 56.1735087,71.6766023 56.1735087,71.6766023 L105.727102,71.6766029 C105.727102,71.6766029 127.835546,73.1411469 127.835546,55.1771866 C127.835546,35.5304025 105.727102,38.3035317 105.727102,38.3035317 L45.0428249,38.3035317 C45.0428249,38.3035317 11.5287276,38.3035313 11.5287276,66.6449208 C11.5287276,94.9863103 45.0428244,93.9579678 45.0428244,93.9579678 L105.449437,94.2138951 Z\" transform=\"translate(69.367532, 66.000000) rotate(-45.000000) translate(-69.367532, -66.000000) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/info-button": {
            "title": "$:/core/images/info-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-info-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <g transform=\"translate(0.049406, 0.000000)\">\n            <path d=\"M64,128 C99.346224,128 128,99.346224 128,64 C128,28.653776 99.346224,0 64,0 C28.653776,0 0,28.653776 0,64 C0,99.346224 28.653776,128 64,128 Z M64,112 C90.509668,112 112,90.509668 112,64 C112,37.490332 90.509668,16 64,16 C37.490332,16 16,37.490332 16,64 C16,90.509668 37.490332,112 64,112 Z\"></path>\n            <circle cx=\"64\" cy=\"32\" r=\"8\"></circle>\n            <rect x=\"56\" y=\"48\" width=\"16\" height=\"56\" rx=\"8\"></rect>\n        </g>\n    </g>\n</svg>"
        },
        "$:/core/images/italic": {
            "title": "$:/core/images/italic",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-italic tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n         <polygon points=\"66.7114846 0 89.1204482 0 62.4089636 128 40 128\"></polygon>\n    </g>\n</svg>"
        },
        "$:/core/images/left-arrow": {
            "created": "20150315234410875",
            "modified": "20150315235324760",
            "tags": "$:/tags/Image",
            "title": "$:/core/images/left-arrow",
            "text": "<svg class=\"tc-image-left-arrow tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <path transform=\"rotate(135, 63.8945, 64.1752)\" d=\"m109.07576,109.35336c-1.43248,1.43361 -3.41136,2.32182 -5.59717,2.32182l-79.16816,0c-4.36519,0 -7.91592,-3.5444 -7.91592,-7.91666c0,-4.36337 3.54408,-7.91667 7.91592,-7.91667l71.25075,0l0,-71.25075c0,-4.3652 3.54442,-7.91592 7.91667,-7.91592c4.36336,0 7.91667,3.54408 7.91667,7.91592l0,79.16815c0,2.1825 -0.88602,4.16136 -2.3185,5.59467l-0.00027,-0.00056z\"/>\n</svg>\n"
        },
        "$:/core/images/line-width": {
            "title": "$:/core/images/line-width",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-line-width tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M128,-97 L112.992786,-97 C112.452362,-97 112,-96.5522847 112,-96 C112,-95.4438648 112.444486,-95 112.992786,-95 L128,-95 L128,-97 Z M128,-78.6794919 L111.216185,-88.3696322 C110.748163,-88.6398444 110.132549,-88.4782926 109.856406,-88 C109.578339,-87.5183728 109.741342,-86.9117318 110.216185,-86.6375814 L128,-76.3700908 L128,-78.6794919 Z M78.6794919,-128 L88.3696322,-111.216185 C88.6437826,-110.741342 88.4816272,-110.134474 88,-109.856406 C87.5217074,-109.580264 86.9077936,-109.748163 86.6375814,-110.216185 L76.3700908,-128 L78.6794919,-128 Z M97,-128 L97,-112.992786 C97,-112.444486 96.5561352,-112 96,-112 C95.4477153,-112 95,-112.452362 95,-112.992786 L95,-128 L97,-128 Z M115.629909,-128 L105.362419,-110.216185 C105.088268,-109.741342 104.481627,-109.578339 104,-109.856406 C103.521707,-110.132549 103.360156,-110.748163 103.630368,-111.216185 L113.320508,-128 L115.629909,-128 Z M128,-113.320508 L111.216185,-103.630368 C110.741342,-103.356217 110.134474,-103.518373 109.856406,-104 C109.580264,-104.478293 109.748163,-105.092206 110.216185,-105.362419 L128,-115.629909 L128,-113.320508 Z M48,-96 C48,-96.5522847 48.4523621,-97 48.9927864,-97 L79.0072136,-97 C79.5555144,-97 80,-96.5561352 80,-96 C80,-95.4477153 79.5476379,-95 79.0072136,-95 L48.9927864,-95 C48.4444856,-95 48,-95.4438648 48,-96 Z M54.4307806,-120 C54.706923,-120.478293 55.3225377,-120.639844 55.7905589,-120.369632 L81.7838153,-105.362419 C82.2586577,-105.088268 82.4216611,-104.481627 82.1435935,-104 C81.8674512,-103.521707 81.2518365,-103.360156 80.7838153,-103.630368 L54.7905589,-118.637581 C54.3157165,-118.911732 54.152713,-119.518373 54.4307806,-120 Z M104,-82.1435935 C104.478293,-82.4197359 105.092206,-82.2518365 105.362419,-81.7838153 L120.369632,-55.7905589 C120.643783,-55.3157165 120.481627,-54.7088482 120,-54.4307806 C119.521707,-54.1546382 118.907794,-54.3225377 118.637581,-54.7905589 L103.630368,-80.7838153 C103.356217,-81.2586577 103.518373,-81.865526 104,-82.1435935 Z M96,-80 C96.5522847,-80 97,-79.5476379 97,-79.0072136 L97,-48.9927864 C97,-48.4444856 96.5561352,-48 96,-48 C95.4477153,-48 95,-48.4523621 95,-48.9927864 L95,-79.0072136 C95,-79.5555144 95.4438648,-80 96,-80 Z M88,-82.1435935 C88.4782926,-81.8674512 88.6398444,-81.2518365 88.3696322,-80.7838153 L73.3624186,-54.7905589 C73.0882682,-54.3157165 72.4816272,-54.152713 72,-54.4307806 C71.5217074,-54.706923 71.3601556,-55.3225377 71.6303678,-55.7905589 L86.6375814,-81.7838153 C86.9117318,-82.2586577 87.5183728,-82.4216611 88,-82.1435935 Z M82.1435935,-88 C82.4197359,-87.5217074 82.2518365,-86.9077936 81.7838153,-86.6375814 L55.7905589,-71.6303678 C55.3157165,-71.3562174 54.7088482,-71.5183728 54.4307806,-72 C54.1546382,-72.4782926 54.3225377,-73.0922064 54.7905589,-73.3624186 L80.7838153,-88.3696322 C81.2586577,-88.6437826 81.865526,-88.4816272 82.1435935,-88 Z M1.30626177e-08,-41.9868843 L15.0170091,-57.9923909 L20.7983821,-52.9749272 L44.7207091,-81.2095939 L73.4260467,-42.1002685 L85.984793,-56.6159488 L104.48741,-34.0310661 L127.969109,-47.4978019 L127.969109,7.99473128e-07 L1.30626177e-08,7.99473128e-07 L1.30626177e-08,-41.9868843 Z M96,-84 C102.627417,-84 108,-89.372583 108,-96 C108,-102.627417 102.627417,-108 96,-108 C89.372583,-108 84,-102.627417 84,-96 C84,-89.372583 89.372583,-84 96,-84 Z\"></path>\n        <path d=\"M16,18 L112,18 C113.104569,18 114,17.1045695 114,16 C114,14.8954305 113.104569,14 112,14 L16,14 C14.8954305,14 14,14.8954305 14,16 C14,17.1045695 14.8954305,18 16,18 L16,18 Z M16,35 L112,35 C114.209139,35 116,33.209139 116,31 C116,28.790861 114.209139,27 112,27 L16,27 C13.790861,27 12,28.790861 12,31 C12,33.209139 13.790861,35 16,35 L16,35 Z M16,56 L112,56 C115.313708,56 118,53.3137085 118,50 C118,46.6862915 115.313708,44 112,44 L16,44 C12.6862915,44 10,46.6862915 10,50 C10,53.3137085 12.6862915,56 16,56 L16,56 Z M16,85 L112,85 C117.522847,85 122,80.5228475 122,75 C122,69.4771525 117.522847,65 112,65 L16,65 C10.4771525,65 6,69.4771525 6,75 C6,80.5228475 10.4771525,85 16,85 L16,85 Z M16,128 L112,128 C120.836556,128 128,120.836556 128,112 C128,103.163444 120.836556,96 112,96 L16,96 C7.163444,96 0,103.163444 0,112 C0,120.836556 7.163444,128 16,128 L16,128 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/link": {
            "title": "$:/core/images/link",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-link tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M128.719999,57.568543 C130.219553,53.8628171 131.045202,49.8121445 131.045202,45.5685425 C131.045202,27.8915447 116.718329,13.5685425 99.0452364,13.5685425 L67.0451674,13.5685425 C49.3655063,13.5685425 35.0452019,27.8954305 35.0452019,45.5685425 C35.0452019,63.2455403 49.3720745,77.5685425 67.0451674,77.5685425 L99.0452364,77.5685425 C100.406772,77.5685425 101.748384,77.4835732 103.065066,77.3186499 C96.4792444,73.7895096 91.1190212,68.272192 87.7873041,61.5685425 L67.0506214,61.5685425 C58.2110723,61.5685425 51.0452019,54.4070414 51.0452019,45.5685425 C51.0452019,36.7319865 58.2005234,29.5685425 67.0506214,29.5685425 L99.0397824,29.5685425 C107.879331,29.5685425 115.045202,36.7300436 115.045202,45.5685425 C115.045202,48.9465282 113.99957,52.0800164 112.21335,54.6623005 C114.314383,56.4735917 117.050039,57.5685425 120.041423,57.5685425 L128.720003,57.5685425 Z\" transform=\"translate(83.045202, 45.568542) rotate(-225.000000) translate(-83.045202, -45.568542)\"></path>\n        <path d=\"M-0.106255113,71.0452019 C-1.60580855,74.7509276 -2.43145751,78.8016001 -2.43145751,83.0452019 C-2.43145751,100.7222 11.8954151,115.045202 29.568508,115.045202 L61.568577,115.045202 C79.2482381,115.045202 93.5685425,100.718314 93.5685425,83.0452019 C93.5685425,65.3682041 79.2416699,51.0452019 61.568577,51.0452019 L29.568508,51.0452019 C28.206973,51.0452019 26.8653616,51.1301711 25.5486799,51.2950943 C32.1345,54.8242347 37.4947231,60.3415524 40.8264403,67.0452019 L61.563123,67.0452019 C70.4026721,67.0452019 77.5685425,74.206703 77.5685425,83.0452019 C77.5685425,91.8817579 70.413221,99.0452019 61.563123,99.0452019 L29.573962,99.0452019 C20.7344129,99.0452019 13.5685425,91.8837008 13.5685425,83.0452019 C13.5685425,79.6672162 14.6141741,76.533728 16.4003949,73.9514439 C14.2993609,72.1401527 11.5637054,71.0452019 8.5723215,71.0452019 L-0.106255113,71.0452019 Z\" transform=\"translate(45.568542, 83.045202) rotate(-225.000000) translate(-45.568542, -83.045202)\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/list-bullet": {
            "title": "$:/core/images/list-bullet",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-list-bullet tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M11.6363636,40.2727273 C18.0629498,40.2727273 23.2727273,35.0629498 23.2727273,28.6363636 C23.2727273,22.2097775 18.0629498,17 11.6363636,17 C5.20977746,17 0,22.2097775 0,28.6363636 C0,35.0629498 5.20977746,40.2727273 11.6363636,40.2727273 Z M11.6363636,75.1818182 C18.0629498,75.1818182 23.2727273,69.9720407 23.2727273,63.5454545 C23.2727273,57.1188684 18.0629498,51.9090909 11.6363636,51.9090909 C5.20977746,51.9090909 0,57.1188684 0,63.5454545 C0,69.9720407 5.20977746,75.1818182 11.6363636,75.1818182 Z M11.6363636,110.090909 C18.0629498,110.090909 23.2727273,104.881132 23.2727273,98.4545455 C23.2727273,92.0279593 18.0629498,86.8181818 11.6363636,86.8181818 C5.20977746,86.8181818 0,92.0279593 0,98.4545455 C0,104.881132 5.20977746,110.090909 11.6363636,110.090909 Z M34.9090909,22.8181818 L128,22.8181818 L128,34.4545455 L34.9090909,34.4545455 L34.9090909,22.8181818 Z M34.9090909,57.7272727 L128,57.7272727 L128,69.3636364 L34.9090909,69.3636364 L34.9090909,57.7272727 Z M34.9090909,92.6363636 L128,92.6363636 L128,104.272727 L34.9090909,104.272727 L34.9090909,92.6363636 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/list-number": {
            "title": "$:/core/images/list-number",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-list-number tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M33.8390805,22.3563218 L128,22.3563218 L128,34.1264368 L33.8390805,34.1264368 L33.8390805,22.3563218 Z M33.8390805,57.6666667 L128,57.6666667 L128,69.4367816 L33.8390805,69.4367816 L33.8390805,57.6666667 Z M33.8390805,92.9770115 L128,92.9770115 L128,104.747126 L33.8390805,104.747126 L33.8390805,92.9770115 Z M0.379509711,42.6307008 L0.379509711,40.4082314 L1.37821948,40.4082314 C2.20382368,40.4082314 2.82301754,40.268077 3.23581964,39.9877642 C3.64862174,39.7074513 3.85501969,39.0400498 3.85501969,37.9855395 L3.85501969,22.7686318 C3.85501969,21.3270228 3.66193774,20.4327047 3.27576803,20.0856507 C2.88959832,19.7385967 1.79768657,19.5650723 0,19.5650723 L0,17.4226919 C3.50215975,17.2758613 6.25191314,16.4683055 8.24934266,15 L10.3666074,15 L10.3666074,37.865406 C10.3666074,38.786434 10.5164123,39.4404875 10.8160268,39.8275862 C11.1156412,40.2146849 11.764796,40.4082314 12.7635108,40.4082314 L13.7622206,40.4082314 L13.7622206,42.6307008 L0.379509711,42.6307008 Z M0.0798967812,77.9873934 L0.0798967812,76.0852799 C7.27064304,69.5312983 10.8659622,63.5046623 10.8659622,58.005191 C10.8659622,56.4434479 10.5397203,55.195407 9.88722667,54.2610308 C9.23473303,53.3266546 8.36253522,52.8594735 7.27060709,52.8594735 C6.3784219,52.8594735 5.61608107,53.1764892 4.98356173,53.8105302 C4.35104238,54.4445712 4.03478745,55.1753759 4.03478745,56.0029663 C4.03478745,56.9773871 4.28113339,57.8316611 4.77383268,58.5658139 C4.88036225,58.7259926 4.93362624,58.8461249 4.93362624,58.9262143 C4.93362624,59.0730449 4.77383427,59.2065252 4.45424555,59.3266593 C4.2411864,59.4067486 3.70188852,59.6336652 2.83633573,60.0074156 C1.99741533,60.3811661 1.47809145,60.5680386 1.2783485,60.5680386 C1.03865696,60.5680386 0.765679018,60.1976307 0.459406492,59.4568039 C0.153133966,58.715977 0,57.9184322 0,57.0641453 C0,55.1153036 0.848894811,53.5202138 2.5467099,52.2788283 C4.24452499,51.0374428 6.34512352,50.4167594 8.84856852,50.4167594 C11.3120649,50.4167594 13.3793735,51.0874979 15.0505562,52.4289952 C16.7217389,53.7704924 17.5573177,55.5224215 17.5573177,57.684835 C17.5573177,58.9662652 17.2743527,60.2076321 16.7084144,61.4089729 C16.142476,62.6103138 14.7875733,64.4623531 12.6436656,66.9651465 C10.4997579,69.4679398 8.40914641,71.7804862 6.3717683,73.902855 L17.8169822,73.902855 L16.7982982,79.6292176 L14.6810335,79.6292176 C14.7609307,79.3489048 14.8008787,79.0952922 14.8008787,78.8683723 C14.8008787,78.4812736 14.7010087,78.237672 14.5012658,78.1375603 C14.3015228,78.0374485 13.9020429,77.9873934 13.3028141,77.9873934 L0.0798967812,77.9873934 Z M12.2042333,97.1935484 C13.9486551,97.2335931 15.4400468,97.8309175 16.6784531,98.9855395 C17.9168594,100.140162 18.5360532,101.75861 18.5360532,103.840934 C18.5360532,106.830938 17.4041935,109.233584 15.14044,111.048943 C12.8766866,112.864303 10.1402492,113.771969 6.93104577,113.771969 C4.92030005,113.771969 3.26245842,113.388213 1.95747114,112.62069 C0.652483855,111.853166 0,110.848727 0,109.607341 C0,108.833144 0.26964894,108.209124 0.808954909,107.735261 C1.34826088,107.261399 1.93749375,107.024472 2.57667119,107.024472 C3.21584864,107.024472 3.73850152,107.224692 4.14464552,107.625139 C4.55078953,108.025586 4.92696644,108.67964 5.27318756,109.587319 C5.73925445,110.855401 6.51158227,111.489433 7.59019421,111.489433 C8.85523291,111.489433 9.87723568,111.012241 10.6562332,110.057842 C11.4352307,109.103444 11.8247236,107.371536 11.8247236,104.862069 C11.8247236,103.153495 11.7048796,101.838714 11.4651881,100.917686 C11.2254966,99.9966584 10.6728827,99.5361513 9.80732989,99.5361513 C9.22141723,99.5361513 8.62219737,99.843156 8.00965231,100.457175 C7.51695303,100.951059 7.07752513,101.197998 6.69135542,101.197998 C6.3584505,101.197998 6.08880156,101.051169 5.88240051,100.757508 C5.67599946,100.463847 5.57280049,100.183539 5.57280049,99.916574 C5.57280049,99.5962164 5.67599946,99.3225818 5.88240051,99.0956618 C6.08880156,98.8687419 6.57150646,98.5016711 7.33052967,97.9944383 C10.2068282,96.0722929 11.6449559,93.9766521 11.6449559,91.7074527 C11.6449559,90.5194601 11.3386879,89.615131 10.7261429,88.9944383 C10.1135978,88.3737455 9.37455999,88.0634038 8.5090072,88.0634038 C7.71003539,88.0634038 6.98431355,88.3270274 6.33181991,88.8542825 C5.67932627,89.3815377 5.35308434,90.0122321 5.35308434,90.7463849 C5.35308434,91.3871 5.60608828,91.9810874 6.11210376,92.5283648 C6.28521432,92.7285883 6.3717683,92.8954387 6.3717683,93.028921 C6.3717683,93.1490551 5.80250943,93.4560598 4.6639746,93.9499444 C3.52543978,94.4438289 2.80970494,94.6907675 2.51674861,94.6907675 C2.10394651,94.6907675 1.76771758,94.3570667 1.50805174,93.6896552 C1.24838591,93.0222436 1.11855494,92.4082342 1.11855494,91.8476085 C1.11855494,90.0989901 2.04734573,88.6240327 3.90495518,87.4226919 C5.76256463,86.2213511 7.86982116,85.6206897 10.226788,85.6206897 C12.2907985,85.6206897 14.0784711,86.0678487 15.5898594,86.9621802 C17.1012478,87.8565117 17.8569306,89.0778566 17.8569306,90.6262514 C17.8569306,91.987771 17.2876717,93.2491599 16.1491369,94.4104561 C15.0106021,95.5717522 13.6956474,96.4994404 12.2042333,97.1935484 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/locked-padlock": {
            "title": "$:/core/images/locked-padlock",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-locked-padlock tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M96.4723753,64 L105,64 L105,96.0097716 C105,113.673909 90.6736461,128 73.001193,128 L55.998807,128 C38.3179793,128 24,113.677487 24,96.0097716 L24,64 L32.0000269,64 C32.0028554,48.2766389 32.3030338,16.2688026 64.1594984,16.2688041 C95.9543927,16.2688056 96.4648869,48.325931 96.4723753,64 Z M80.5749059,64 L48.4413579,64 C48.4426205,47.71306 48.5829272,31.9999996 64.1595001,31.9999996 C79.8437473,31.9999996 81.1369461,48.1359182 80.5749059,64 Z M67.7315279,92.3641717 C70.8232551,91.0923621 73,88.0503841 73,84.5 C73,79.8055796 69.1944204,76 64.5,76 C59.8055796,76 56,79.8055796 56,84.5 C56,87.947435 58.0523387,90.9155206 61.0018621,92.2491029 L55.9067479,115.020857 L72.8008958,115.020857 L67.7315279,92.3641717 L67.7315279,92.3641717 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/mail": {
            "title": "$:/core/images/mail",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-mail tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M122.826782,104.894066 C121.945525,105.22777 120.990324,105.41043 119.993027,105.41043 L8.00697327,105.41043 C7.19458381,105.41043 6.41045219,105.289614 5.67161357,105.064967 L5.67161357,105.064967 L39.8346483,70.9019325 L60.6765759,91.7438601 C61.6118278,92.679112 62.8865166,93.0560851 64.0946097,92.8783815 C65.2975108,93.0473238 66.5641085,92.6696979 67.4899463,91.7438601 L88.5941459,70.6396605 C88.6693095,70.7292352 88.7490098,70.8162939 88.8332479,70.9005321 L122.826782,104.894066 Z M127.903244,98.6568194 C127.966933,98.2506602 128,97.8343714 128,97.4103789 L128,33.410481 C128,32.7414504 127.917877,32.0916738 127.763157,31.4706493 L94.2292399,65.0045665 C94.3188145,65.0797417 94.4058701,65.1594458 94.4901021,65.2436778 L127.903244,98.6568194 Z M0.205060636,99.2178117 C0.0709009529,98.6370366 0,98.0320192 0,97.4103789 L0,33.410481 C0,32.694007 0.0944223363,31.9995312 0.27147538,31.3387595 L0.27147538,31.3387595 L34.1777941,65.2450783 L0.205060636,99.2178117 L0.205060636,99.2178117 Z M5.92934613,25.6829218 C6.59211333,25.5051988 7.28862283,25.4104299 8.00697327,25.4104299 L119.993027,25.4104299 C120.759109,25.4104299 121.500064,25.5178649 122.201605,25.7184927 L122.201605,25.7184927 L64.0832611,83.8368368 L5.92934613,25.6829218 L5.92934613,25.6829218 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/menu-button": {
            "title": "$:/core/images/menu-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-menu-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <rect x=\"0\" y=\"16\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n    <rect x=\"0\" y=\"56\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n    <rect x=\"0\" y=\"96\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n</svg>"
        },
        "$:/core/images/mono-block": {
            "title": "$:/core/images/mono-block",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-mono-block tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M23.9653488,32.9670593 L24.3217888,32.9670593 C25.0766067,32.9670593 25.6497006,33.1592554 26.0410876,33.5436534 C26.4324747,33.9280514 26.6281653,34.4906619 26.6281653,35.2315017 C26.6281653,36.0562101 26.4219913,36.6502709 26.009637,37.0137017 C25.5972828,37.3771326 24.9158602,37.5588453 23.9653488,37.5588453 L17.6542639,37.5588453 C16.6897744,37.5588453 16.0048573,37.380627 15.5994921,37.0241852 C15.1941269,36.6677435 14.9914474,36.0701882 14.9914474,35.2315017 C14.9914474,34.4207713 15.1941269,33.8406885 15.5994921,33.4912358 C16.0048573,33.141783 16.6897744,32.9670593 17.6542639,32.9670593 L18.388111,32.9670593 L17.5284616,30.5139133 L8.47069195,30.5139133 L7.5691084,32.9670593 L8.30295547,32.9670593 C9.25346691,32.9670593 9.93488953,33.1452775 10.3472438,33.5017193 C10.759598,33.8581611 10.965772,34.4347494 10.965772,35.2315017 C10.965772,36.0562101 10.759598,36.6502709 10.3472438,37.0137017 C9.93488953,37.3771326 9.25346691,37.5588453 8.30295547,37.5588453 L2.89345418,37.5588453 C1.92896463,37.5588453 1.24404754,37.3771326 0.838682371,37.0137017 C0.433317198,36.6502709 0.230637652,36.0562101 0.230637652,35.2315017 C0.230637652,34.4906619 0.426328248,33.9280514 0.817715312,33.5436534 C1.20910238,33.1592554 1.78219626,32.9670593 2.53701417,32.9670593 L2.89345418,32.9670593 L8.51262607,17.3256331 L6.83526132,17.3256331 C5.88474988,17.3256331 5.20332727,17.1439204 4.79097304,16.7804895 C4.37861882,16.4170587 4.1724448,15.8299869 4.1724448,15.0192565 C4.1724448,14.1945481 4.37861882,13.6004873 4.79097304,13.2370565 C5.20332727,12.8736257 5.88474988,12.691913 6.83526132,12.691913 L14.6979086,12.691913 C15.9419603,12.691913 16.815579,13.3628521 17.318791,14.7047506 L17.318791,14.7676518 L23.9653488,32.9670593 Z M12.9786097,17.3256331 L9.9383861,26.1737321 L16.0188333,26.1737321 L12.9786097,17.3256331 Z M35.3809383,26.6979086 L35.3809383,33.0928616 L38.5259972,33.0928616 C40.7485166,33.0928616 42.3140414,32.8482484 43.2226185,32.3590146 C44.1311956,31.8697807 44.5854773,31.0520736 44.5854773,29.9058686 C44.5854773,28.7456855 44.1521624,27.9209895 43.2855197,27.4317556 C42.4188769,26.9425218 40.9022748,26.6979086 38.7356678,26.6979086 L35.3809383,26.6979086 Z M46.0741385,24.370565 C47.5977525,24.9296893 48.7159844,25.6949794 49.428868,26.666458 C50.1417516,27.6379366 50.498188,28.8784752 50.498188,30.388111 C50.498188,31.6601189 50.1906743,32.8202846 49.5756374,33.8686428 C48.9606006,34.917001 48.0799929,35.7766419 46.933788,36.4475911 C46.2628387,36.8389782 45.5115266,37.1220307 44.6798291,37.296757 C43.8481316,37.4714834 42.6704935,37.5588453 41.1468796,37.5588453 L39.3856466,37.5588453 L30.2020747,37.5588453 C29.2795194,37.5588453 28.6190637,37.3771326 28.2206876,37.0137017 C27.8223114,36.6502709 27.6231264,36.0562101 27.6231264,35.2315017 C27.6231264,34.4906619 27.811828,33.9280514 28.189237,33.5436534 C28.5666459,33.1592554 29.118773,32.9670593 29.8456347,32.9670593 L30.2020747,32.9670593 L30.2020747,17.3256331 L29.8456347,17.3256331 C29.118773,17.3256331 28.5666459,17.1299425 28.189237,16.7385554 C27.811828,16.3471683 27.6231264,15.7740744 27.6231264,15.0192565 C27.6231264,14.2085262 27.8258059,13.6179599 28.2311711,13.24754 C28.6365363,12.8771201 29.2934976,12.691913 30.2020747,12.691913 L39.8469219,12.691913 C42.796303,12.691913 45.0362615,13.2650068 46.5668644,14.4112118 C48.0974674,15.5574168 48.8627574,17.2347648 48.8627574,19.443306 C48.8627574,20.5335986 48.6286276,21.4945792 48.1603609,22.3262767 C47.6920943,23.1579742 46.9966938,23.8393968 46.0741385,24.370565 L46.0741385,24.370565 Z M35.3809383,17.1998307 L35.3809383,22.4835296 L38.2114913,22.4835296 C39.9307988,22.4835296 41.1433816,22.2808501 41.8492761,21.8754849 C42.5551706,21.4701197 42.9081126,20.7852027 42.9081126,19.8207131 C42.9081126,18.912136 42.5901154,18.2481858 41.9541114,17.8288425 C41.3181074,17.4094992 40.2872373,17.1998307 38.8614701,17.1998307 L35.3809383,17.1998307 Z M71.244119,13.3838259 C71.5236812,12.880614 71.8102281,12.5241775 72.1037684,12.3145059 C72.3973087,12.1048342 72.7677231,12 73.2150226,12 C73.8999499,12 74.3856819,12.1817127 74.6722332,12.5451435 C74.9587844,12.9085744 75.1020579,13.5305909 75.1020579,14.4112118 L75.143992,19.8626472 C75.143992,20.8271368 74.9867406,21.4771091 74.6722332,21.8125837 C74.3577257,22.1480584 73.7881263,22.3157932 72.9634178,22.3157932 C72.3763372,22.3157932 71.92555,22.1760142 71.6110425,21.896452 C71.2965351,21.6168898 71.0274605,21.0997075 70.8038107,20.3448896 C70.4403799,19.0169692 69.8602971,18.0629775 69.0635448,17.482886 C68.2667926,16.9027945 67.1625385,16.612753 65.7507494,16.612753 C63.5981206,16.612753 61.9487284,17.3396038 60.8025235,18.7933272 C59.6563185,20.2470506 59.0832246,22.3507245 59.0832246,25.104412 C59.0832246,27.8441215 59.6633074,29.9477954 60.8234905,31.4154969 C61.9836736,32.8831984 63.6400547,33.6170381 65.7926836,33.6170381 C67.2603851,33.6170381 68.878327,33.1278116 70.6465578,32.149344 C72.4147886,31.1708763 73.5295261,30.6816498 73.9908037,30.6816498 C74.53595,30.6816498 74.9937262,30.9122852 75.3641461,31.3735628 C75.734566,31.8348404 75.9197732,32.4079343 75.9197732,33.0928616 C75.9197732,34.3229353 74.836486,35.4831009 72.669879,36.5733935 C70.5032721,37.663686 68.0641285,38.2088241 65.3523753,38.2088241 C61.6901107,38.2088241 58.7267959,36.9997358 56.4623422,34.5815228 C54.1978885,32.1633099 53.0656786,29.0043046 53.0656786,25.104412 C53.0656786,21.3443006 54.2118664,18.22024 56.5042763,15.7321366 C58.7966863,13.2440331 61.7040894,12 65.226573,12 C66.2190187,12 67.1974717,12.1118232 68.1619613,12.3354729 C69.1264508,12.5591227 70.1538264,12.9085702 71.244119,13.3838259 L71.244119,13.3838259 Z M81.4645862,32.9670593 L81.4645862,17.3256331 L81.1081461,17.3256331 C80.3533282,17.3256331 79.7802344,17.1299425 79.3888473,16.7385554 C78.9974602,16.3471683 78.8017696,15.7740744 78.8017696,15.0192565 C78.8017696,14.2085262 79.0114381,13.6179599 79.4307814,13.24754 C79.8501247,12.8771201 80.5280528,12.691913 81.4645862,12.691913 L85.4063933,12.691913 L86.6434498,12.691913 C89.5648747,12.691913 91.7034933,12.8177141 93.0593699,13.06932 C94.4152465,13.320926 95.5684233,13.740263 96.5189347,14.3273436 C98.210286,15.3337675 99.5067362,16.7699967 100.408324,18.6360743 C101.309912,20.5021519 101.7607,22.6582429 101.7607,25.104412 C101.7607,27.6903623 101.247012,29.9512876 100.219621,31.8872557 C99.1922296,33.8232239 97.7350336,35.2874089 95.8479888,36.2798546 C94.9953241,36.7271541 93.9959043,37.0521403 92.8496993,37.2548229 C91.7034944,37.4575055 89.9981906,37.5588453 87.7337369,37.5588453 L85.4063933,37.5588453 L81.4645862,37.5588453 C80.5000966,37.5588453 79.8151795,37.380627 79.4098143,37.0241852 C79.0044492,36.6677435 78.8017696,36.0701882 78.8017696,35.2315017 C78.8017696,34.4906619 78.9974602,33.9280514 79.3888473,33.5436534 C79.7802344,33.1592554 80.3533282,32.9670593 81.1081461,32.9670593 L81.4645862,32.9670593 Z M86.8740874,17.2417648 L86.8740874,32.9670593 L88.0692098,32.9670593 C90.7110725,32.9670593 92.6609895,32.3205814 93.9190194,31.0276063 C95.1770492,29.7346312 95.8060547,27.7462749 95.8060547,25.0624779 C95.8060547,22.4206153 95.1665658,20.4497314 93.8875688,19.1497672 C92.6085718,17.849803 90.6831161,17.1998307 88.1111439,17.1998307 C87.7756693,17.1998307 87.5205727,17.2033252 87.3458463,17.2103142 C87.1711199,17.2173033 87.0138685,17.2277867 86.8740874,17.2417648 L86.8740874,17.2417648 Z M121.94052,17.1159625 L112.190837,17.1159625 L112.190837,22.4835296 L115.88104,22.4835296 L115.88104,22.2319249 C115.88104,21.4351727 116.055763,20.841112 116.405216,20.4497249 C116.754669,20.0583378 117.285829,19.8626472 117.998713,19.8626472 C118.627728,19.8626472 119.141415,20.0408655 119.539792,20.3973072 C119.938168,20.753749 120.137353,21.2045363 120.137353,21.7496826 C120.137353,21.7776388 120.144342,21.8684951 120.15832,22.0222543 C120.172298,22.1760135 120.179287,22.3297704 120.179287,22.4835296 L120.179287,26.8237109 C120.179287,27.7602442 120.011552,28.4311834 119.676077,28.8365486 C119.340603,29.2419138 118.795465,29.4445933 118.040647,29.4445933 C117.327763,29.4445933 116.789614,29.2558917 116.426183,28.8784827 C116.062752,28.5010738 115.88104,27.9419578 115.88104,27.201118 L115.88104,26.8237109 L112.190837,26.8237109 L112.190837,33.0928616 L121.94052,33.0928616 L121.94052,30.5977816 C121.94052,29.6612482 122.118738,28.9903091 122.47518,28.5849439 C122.831622,28.1795787 123.415199,27.9768992 124.225929,27.9768992 C125.022682,27.9768992 125.592281,28.1760842 125.934745,28.5744604 C126.277208,28.9728365 126.448438,29.6472701 126.448438,30.5977816 L126.448438,35.6718099 C126.448438,36.4266278 126.30167,36.9298322 126.008129,37.1814382 C125.714589,37.4330442 125.134506,37.5588453 124.267863,37.5588453 L107.095842,37.5588453 C106.173287,37.5588453 105.512831,37.3771326 105.114455,37.0137017 C104.716079,36.6502709 104.516894,36.0562101 104.516894,35.2315017 C104.516894,34.4906619 104.705595,33.9280514 105.083004,33.5436534 C105.460413,33.1592554 106.01254,32.9670593 106.739402,32.9670593 L107.095842,32.9670593 L107.095842,17.3256331 L106.739402,17.3256331 C106.026518,17.3256331 105.477886,17.126448 105.093488,16.7280719 C104.70909,16.3296957 104.516894,15.7600963 104.516894,15.0192565 C104.516894,14.2085262 104.719573,13.6179599 105.124938,13.24754 C105.530304,12.8771201 106.187265,12.691913 107.095842,12.691913 L124.267863,12.691913 C125.120528,12.691913 125.697116,12.8212085 125.997646,13.0798036 C126.298175,13.3383986 126.448438,13.8520864 126.448438,14.6208824 L126.448438,19.3175037 C126.448438,20.2680151 126.273714,20.9494377 125.924261,21.361792 C125.574808,21.7741462 125.008703,21.9803202 124.225929,21.9803202 C123.415199,21.9803202 122.831622,21.7706517 122.47518,21.3513084 C122.118738,20.9319652 121.94052,20.254037 121.94052,19.3175037 L121.94052,17.1159625 Z M19.7719369,47.6405477 C20.037521,47.1373358 20.3205734,46.7808993 20.6211028,46.5712277 C20.9216322,46.361556 21.295541,46.2567218 21.7428405,46.2567218 C22.4277678,46.2567218 22.9134998,46.4384345 23.2000511,46.8018653 C23.4866023,47.1652962 23.6298758,47.7873127 23.6298758,48.6679336 L23.6718099,54.119369 C23.6718099,55.0838586 23.5145586,55.7338309 23.2000511,56.0693055 C22.8855436,56.4047802 22.3089553,56.572515 21.4702687,56.572515 C20.8831881,56.572515 20.4254119,56.4292415 20.0969263,56.1426902 C19.7684407,55.856139 19.4993662,55.3424512 19.2896945,54.6016114 C18.9122856,53.2597129 18.3322027,52.3022267 17.5494286,51.7291243 C16.7666545,51.1560218 15.6693894,50.8694748 14.2576003,50.8694748 C12.1049715,50.8694748 10.4590738,51.5963256 9.31985785,53.050049 C8.18064193,54.5037724 7.61104252,56.6074463 7.61104252,59.3611338 C7.61104252,62.1148214 8.20859773,64.2429566 9.40372609,65.7456034 C10.5988544,67.2482501 12.2936748,67.9995623 14.488238,67.9995623 C14.9914499,67.9995623 15.5645438,67.9401562 16.2075368,67.8213423 C16.8505299,67.7025283 17.6053364,67.5173212 18.4719792,67.2657152 L18.4719792,63.9529198 L16.1027015,63.9529198 C15.1521901,63.9529198 14.4777564,63.7781961 14.0793803,63.4287433 C13.6810042,63.0792906 13.4818191,62.4992078 13.4818191,61.6884774 C13.4818191,60.8497908 13.6810042,60.2522356 14.0793803,59.8957938 C14.4777564,59.5393521 15.1521901,59.3611338 16.1027015,59.3611338 L23.6718099,59.3611338 C24.6502776,59.3611338 25.3386891,59.5358576 25.7370653,59.8853103 C26.1354414,60.2347631 26.3346265,60.8218348 26.3346265,61.6465433 C26.3346265,62.3873831 26.1354414,62.9569825 25.7370653,63.3553586 C25.3386891,63.7537347 24.7621008,63.9529198 24.0072829,63.9529198 L23.6718099,63.9529198 L23.6718099,68.9430799 L23.6718099,69.1946846 C23.6718099,69.6419841 23.6228873,69.9529924 23.5250405,70.1277188 C23.4271937,70.3024451 23.2315031,70.4806634 22.9379628,70.6623788 C22.1412106,71.1376345 20.8762107,71.5569715 19.1429251,71.9204023 C17.4096396,72.2838332 15.6554131,72.4655459 13.8801932,72.4655459 C10.2179286,72.4655459 7.25461383,71.2564576 4.99016011,68.8382446 C2.72570638,66.4200317 1.59349651,63.2610264 1.59349651,59.3611338 C1.59349651,55.6010224 2.73968428,52.4769618 5.03209423,49.9888583 C7.32450417,47.5007549 10.2319073,46.2567218 13.7543909,46.2567218 C14.7328585,46.2567218 15.7078171,46.368545 16.6792957,46.5921947 C17.6507743,46.8158445 18.6816444,47.165292 19.7719369,47.6405477 L19.7719369,47.6405477 Z M35.611576,51.5823548 L35.611576,56.4047785 L42.4678043,56.4047785 L42.4678043,51.5823548 L42.1323314,51.5823548 C41.3775135,51.5823548 40.8009251,51.3866642 40.402549,50.9952772 C40.0041729,50.6038901 39.8049878,50.0307962 39.8049878,49.2759783 C39.8049878,48.4512699 40.0111618,47.8572091 40.4235161,47.4937783 C40.8358703,47.1303474 41.5172929,46.9486347 42.4678043,46.9486347 L47.8773056,46.9486347 C48.8278171,46.9486347 49.5022507,47.1303474 49.9006269,47.4937783 C50.299003,47.8572091 50.498188,48.4512699 50.498188,49.2759783 C50.498188,50.0307962 50.3059919,50.6038901 49.9215939,50.9952772 C49.5371959,51.3866642 48.9745854,51.5823548 48.2337456,51.5823548 L47.8773056,51.5823548 L47.8773056,67.2237811 L48.2337456,67.2237811 C48.9885636,67.2237811 49.5616574,67.4159772 49.9530445,67.8003752 C50.3444316,68.1847732 50.5401222,68.7473837 50.5401222,69.4882235 C50.5401222,70.3129319 50.3374426,70.9069927 49.9320774,71.2704235 C49.5267123,71.6338543 48.8417952,71.815567 47.8773056,71.815567 L42.4678043,71.815567 C41.5033148,71.815567 40.8183977,71.6373488 40.4130325,71.280907 C40.0076674,70.9244652 39.8049878,70.32691 39.8049878,69.4882235 C39.8049878,68.7473837 40.0041729,68.1847732 40.402549,67.8003752 C40.8009251,67.4159772 41.3775135,67.2237811 42.1323314,67.2237811 L42.4678043,67.2237811 L42.4678043,61.0384986 L35.611576,61.0384986 L35.611576,67.2237811 L35.9470489,67.2237811 C36.7018668,67.2237811 37.2784552,67.4159772 37.6768313,67.8003752 C38.0752074,68.1847732 38.2743925,68.7473837 38.2743925,69.4882235 C38.2743925,70.3129319 38.0682185,70.9069927 37.6558642,71.2704235 C37.24351,71.6338543 36.5620874,71.815567 35.611576,71.815567 L30.2020747,71.815567 C29.2375851,71.815567 28.552668,71.6373488 28.1473029,71.280907 C27.7419377,70.9244652 27.5392581,70.32691 27.5392581,69.4882235 C27.5392581,68.7473837 27.7349487,68.1847732 28.1263358,67.8003752 C28.5177229,67.4159772 29.0908168,67.2237811 29.8456347,67.2237811 L30.2020747,67.2237811 L30.2020747,51.5823548 L29.8456347,51.5823548 C29.1047949,51.5823548 28.5421844,51.3866642 28.1577864,50.9952772 C27.7733884,50.6038901 27.5811923,50.0307962 27.5811923,49.2759783 C27.5811923,48.4512699 27.7803773,47.8572091 28.1787534,47.4937783 C28.5771296,47.1303474 29.2515632,46.9486347 30.2020747,46.9486347 L35.611576,46.9486347 C36.5481093,46.9486347 37.2260374,47.1303474 37.6453807,47.4937783 C38.064724,47.8572091 38.2743925,48.4512699 38.2743925,49.2759783 C38.2743925,50.0307962 38.0752074,50.6038901 37.6768313,50.9952772 C37.2784552,51.3866642 36.7018668,51.5823548 35.9470489,51.5823548 L35.611576,51.5823548 Z M67.365213,51.5823548 L67.365213,67.2237811 L70.887679,67.2237811 C71.8381904,67.2237811 72.519613,67.4019993 72.9319673,67.7584411 C73.3443215,68.1148829 73.5504955,68.6914712 73.5504955,69.4882235 C73.5504955,70.2989538 73.340827,70.8895201 72.9214837,71.25994 C72.5021404,71.6303599 71.8242123,71.815567 70.887679,71.815567 L58.4332458,71.815567 C57.4827343,71.815567 56.8013117,71.6338543 56.3889575,71.2704235 C55.9766033,70.9069927 55.7704292,70.3129319 55.7704292,69.4882235 C55.7704292,68.6774931 55.9731088,68.0974103 56.378474,67.7479575 C56.7838391,67.3985048 57.4687562,67.2237811 58.4332458,67.2237811 L61.9557117,67.2237811 L61.9557117,51.5823548 L58.4332458,51.5823548 C57.4827343,51.5823548 56.8013117,51.4006421 56.3889575,51.0372113 C55.9766033,50.6737805 55.7704292,50.0867087 55.7704292,49.2759783 C55.7704292,48.4512699 55.9731088,47.8641981 56.378474,47.5147453 C56.7838391,47.1652926 57.4687562,46.9905689 58.4332458,46.9905689 L70.887679,46.9905689 C71.8801247,46.9905689 72.5720308,47.1652926 72.9634178,47.5147453 C73.3548049,47.8641981 73.5504955,48.4512699 73.5504955,49.2759783 C73.5504955,50.0867087 73.347816,50.6737805 72.9424508,51.0372113 C72.5370856,51.4006421 71.8521685,51.5823548 70.887679,51.5823548 L67.365213,51.5823548 Z M97.8608265,51.5823548 L97.8608265,63.1771386 L97.8608265,63.5755127 C97.8608265,65.4485794 97.7385199,66.8044357 97.493903,67.6431222 C97.2492861,68.4818088 96.8404325,69.2296264 96.26733,69.8865976 C95.5264902,70.7392623 94.4991146,71.3822457 93.1851723,71.815567 C91.87123,72.2488884 90.2917273,72.4655459 88.4466169,72.4655459 C87.1466527,72.4655459 85.8921362,72.3397448 84.6830298,72.0881388 C83.4739233,71.8365328 82.3102631,71.4591296 81.1920144,70.9559176 C80.5769776,70.6763554 80.175113,70.31293 79.9864085,69.8656305 C79.797704,69.418331 79.7033532,68.6914802 79.7033532,67.6850564 L79.7033532,63.3658422 C79.7033532,62.1637247 79.8780769,61.3250508 80.2275297,60.849795 C80.5769824,60.3745393 81.185021,60.136915 82.0516638,60.136915 C83.2957156,60.136915 83.9806326,61.0524675 84.1064356,62.8835998 C84.1204137,63.2050963 84.1413806,63.4497096 84.1693368,63.6174469 C84.3370741,65.2389076 84.7144774,66.3466561 85.301558,66.9407258 C85.8886386,67.5347954 86.8251579,67.8318258 88.1111439,67.8318258 C89.7046484,67.8318258 90.8263749,67.4089943 91.476357,66.5633187 C92.126339,65.7176431 92.4513252,64.1765796 92.4513252,61.9400821 L92.4513252,51.5823548 L88.9288593,51.5823548 C87.9783478,51.5823548 87.2969252,51.4006421 86.884571,51.0372113 C86.4722168,50.6737805 86.2660427,50.0867087 86.2660427,49.2759783 C86.2660427,48.4512699 86.4652278,47.8641981 86.8636039,47.5147453 C87.26198,47.1652926 87.9503916,46.9905689 88.9288593,46.9905689 L99.6220595,46.9905689 C100.600527,46.9905689 101.288939,47.1652926 101.687315,47.5147453 C102.085691,47.8641981 102.284876,48.4512699 102.284876,49.2759783 C102.284876,50.0867087 102.078702,50.6737805 101.666348,51.0372113 C101.253994,51.4006421 100.572571,51.5823548 99.6220595,51.5823548 L97.8608265,51.5823548 Z M112.505343,51.5823548 L112.505343,57.9353738 L118.984165,51.4565525 C118.257303,51.3726838 117.747109,51.1665098 117.453569,50.8380242 C117.160029,50.5095387 117.013261,49.9888619 117.013261,49.2759783 C117.013261,48.4512699 117.212446,47.8572091 117.610822,47.4937783 C118.009198,47.1303474 118.683632,46.9486347 119.634143,46.9486347 L124.771073,46.9486347 C125.721584,46.9486347 126.396018,47.1303474 126.794394,47.4937783 C127.19277,47.8572091 127.391955,48.4512699 127.391955,49.2759783 C127.391955,50.0447743 127.19277,50.6213627 126.794394,51.0057607 C126.396018,51.3901587 125.812441,51.5823548 125.043645,51.5823548 L124.561402,51.5823548 L118.459988,57.641835 C119.592215,58.4805215 120.626579,59.5812811 121.563113,60.9441468 C122.499646,62.3070125 123.596911,64.400203 124.854941,67.2237811 L125.127513,67.2237811 L125.546854,67.2237811 C126.371563,67.2237811 126.98659,67.4124827 127.391955,67.7898917 C127.79732,68.1673006 128,68.7334056 128,69.4882235 C128,70.3129319 127.793826,70.9069927 127.381472,71.2704235 C126.969118,71.6338543 126.287695,71.815567 125.337183,71.815567 L122.758235,71.815567 C121.626008,71.815567 120.710456,71.0537715 120.01155,69.5301576 C119.885747,69.2505954 119.787902,69.026949 119.718012,68.8592117 C118.795456,66.9022764 117.949793,65.3926632 117.180997,64.3303269 C116.412201,63.2679906 115.510627,62.2965265 114.476247,61.4159056 L112.505343,63.302941 L112.505343,67.2237811 L112.840816,67.2237811 C113.595634,67.2237811 114.172222,67.4159772 114.570599,67.8003752 C114.968975,68.1847732 115.16816,68.7473837 115.16816,69.4882235 C115.16816,70.3129319 114.961986,70.9069927 114.549631,71.2704235 C114.137277,71.6338543 113.455855,71.815567 112.505343,71.815567 L107.095842,71.815567 C106.131352,71.815567 105.446435,71.6373488 105.04107,71.280907 C104.635705,70.9244652 104.433025,70.32691 104.433025,69.4882235 C104.433025,68.7473837 104.628716,68.1847732 105.020103,67.8003752 C105.41149,67.4159772 105.984584,67.2237811 106.739402,67.2237811 L107.095842,67.2237811 L107.095842,51.5823548 L106.739402,51.5823548 C105.998562,51.5823548 105.435952,51.3866642 105.051554,50.9952772 C104.667156,50.6038901 104.474959,50.0307962 104.474959,49.2759783 C104.474959,48.4512699 104.674145,47.8572091 105.072521,47.4937783 C105.470897,47.1303474 106.14533,46.9486347 107.095842,46.9486347 L112.505343,46.9486347 C113.441877,46.9486347 114.119805,47.1303474 114.539148,47.4937783 C114.958491,47.8572091 115.16816,48.4512699 115.16816,49.2759783 C115.16816,50.0307962 114.968975,50.6038901 114.570599,50.9952772 C114.172222,51.3866642 113.595634,51.5823548 112.840816,51.5823548 L112.505343,51.5823548 Z M13.439885,96.325622 L17.4445933,84.4372993 C17.6961993,83.6545252 18.0456468,83.0849258 18.4929463,82.728484 C18.9402458,82.3720422 19.5343065,82.193824 20.2751463,82.193824 L23.5460076,82.193824 C24.496519,82.193824 25.1779416,82.3755367 25.5902958,82.7389675 C26.0026501,83.1023984 26.2088241,83.6964591 26.2088241,84.5211676 C26.2088241,85.2759855 26.009639,85.8490794 25.6112629,86.2404664 C25.2128868,86.6318535 24.6362984,86.8275441 23.8814805,86.8275441 L23.5460076,86.8275441 L24.1330852,102.46897 L24.4895252,102.46897 C25.2443431,102.46897 25.8104481,102.661166 26.187857,103.045564 C26.565266,103.429962 26.7539676,103.992573 26.7539676,104.733413 C26.7539676,105.558121 26.5547826,106.152182 26.1564064,106.515613 C25.7580303,106.879044 25.0835967,107.060756 24.1330852,107.060756 L19.4154969,107.060756 C18.4649855,107.060756 17.7905518,106.882538 17.3921757,106.526096 C16.9937996,106.169654 16.7946145,105.572099 16.7946145,104.733413 C16.7946145,103.992573 16.9868106,103.429962 17.3712086,103.045564 C17.7556066,102.661166 18.325206,102.46897 19.0800239,102.46897 L19.4154969,102.46897 L19.1219581,89.6790642 L16.0607674,99.1981091 C15.8371177,99.9109927 15.5191204,100.42468 15.1067662,100.739188 C14.694412,101.053695 14.1248126,101.210947 13.3979509,101.210947 C12.6710892,101.210947 12.0945008,101.053695 11.6681685,100.739188 C11.2418362,100.42468 10.91685,99.9109927 10.6932002,99.1981091 L7.65297664,89.6790642 L7.35943781,102.46897 L7.69491075,102.46897 C8.44972866,102.46897 9.01932808,102.661166 9.40372609,103.045564 C9.78812409,103.429962 9.98032022,103.992573 9.98032022,104.733413 C9.98032022,105.558121 9.77764067,106.152182 9.3722755,106.515613 C8.96691032,106.879044 8.29597114,107.060756 7.35943781,107.060756 L2.62088241,107.060756 C1.68434908,107.060756 1.01340989,106.879044 0.608044719,106.515613 C0.202679546,106.152182 0,105.558121 0,104.733413 C0,103.992573 0.192196121,103.429962 0.57659413,103.045564 C0.960992139,102.661166 1.53059155,102.46897 2.28540946,102.46897 L2.62088241,102.46897 L3.22892713,86.8275441 L2.89345418,86.8275441 C2.13863627,86.8275441 1.56204791,86.6318535 1.16367179,86.2404664 C0.765295672,85.8490794 0.5661106,85.2759855 0.5661106,84.5211676 C0.5661106,83.6964591 0.772284622,83.1023984 1.18463885,82.7389675 C1.59699308,82.3755367 2.27841569,82.193824 3.22892713,82.193824 L6.49978838,82.193824 C7.22665007,82.193824 7.81022738,82.3685477 8.25053783,82.7180005 C8.69084827,83.0674532 9.05077919,83.6405471 9.33034138,84.4372993 L13.439885,96.325622 Z M43.8935644,98.3803938 L43.8935644,86.8275441 L42.7403761,86.8275441 C41.8178209,86.8275441 41.1573651,86.6458314 40.758989,86.2824006 C40.3606129,85.9189697 40.1614278,85.3318979 40.1614278,84.5211676 C40.1614278,83.7104372 40.3606129,83.119871 40.758989,82.7494511 C41.1573651,82.3790312 41.8178209,82.193824 42.7403761,82.193824 L48.6950209,82.193824 C49.6035981,82.193824 50.2605593,82.3790312 50.6659245,82.7494511 C51.0712897,83.119871 51.2739692,83.7104372 51.2739692,84.5211676 C51.2739692,85.2620074 51.0817731,85.8316068 50.6973751,86.2299829 C50.3129771,86.628359 49.7643445,86.8275441 49.051461,86.8275441 L48.6950209,86.8275441 L48.6950209,105.865634 C48.6950209,106.522605 48.6251315,106.934953 48.4853504,107.10269 C48.3455693,107.270428 48.0310665,107.354295 47.5418327,107.354295 L45.4451268,107.354295 C44.7741775,107.354295 44.3024234,107.284406 44.0298503,107.144625 C43.7572771,107.004843 43.5231473,106.76023 43.3274538,106.410777 L34.6051571,91.0838571 L34.6051571,102.46897 L35.8212466,102.46897 C36.7298237,102.46897 37.379796,102.643694 37.7711831,102.993147 C38.1625701,103.3426 38.3582607,103.922682 38.3582607,104.733413 C38.3582607,105.558121 38.1590757,106.152182 37.7606995,106.515613 C37.3623234,106.879044 36.7158456,107.060756 35.8212466,107.060756 L29.8037005,107.060756 C28.8951234,107.060756 28.2381621,106.879044 27.832797,106.515613 C27.4274318,106.152182 27.2247522,105.558121 27.2247522,104.733413 C27.2247522,103.992573 27.4134539,103.429962 27.7908629,103.045564 C28.1682718,102.661166 28.7273878,102.46897 29.4682276,102.46897 L29.8037005,102.46897 L29.8037005,86.8275441 L29.4682276,86.8275441 C28.755344,86.8275441 28.203217,86.628359 27.8118299,86.2299829 C27.4204428,85.8316068 27.2247522,85.2620074 27.2247522,84.5211676 C27.2247522,83.7104372 27.4309263,83.119871 27.8432805,82.7494511 C28.2556347,82.3790312 28.9091015,82.193824 29.8037005,82.193824 L33.2422983,82.193824 C34.0670067,82.193824 34.6261227,82.3021527 34.919663,82.5188134 C35.2132033,82.7354741 35.5416839,83.1722835 35.9051148,83.8292546 L43.8935644,98.3803938 Z M64.6604624,86.3662688 C62.8572863,86.3662688 61.4420239,87.0931196 60.4146329,88.546843 C59.3872418,90.0005663 58.873554,92.0203728 58.873554,94.6063231 C58.873554,97.1922733 59.3907363,99.2190688 60.4251164,100.68677 C61.4594965,102.154472 62.8712644,102.888312 64.6604624,102.888312 C66.4636385,102.888312 67.8823953,102.157966 68.9167754,100.697254 C69.9511555,99.2365414 70.4683378,97.2062514 70.4683378,94.6063231 C70.4683378,92.0203728 69.95465,90.0005663 68.9272589,88.546843 C67.8998679,87.0931196 66.4776166,86.3662688 64.6604624,86.3662688 L64.6604624,86.3662688 Z M64.6604624,81.501911 C68.0990773,81.501911 70.929602,82.7319662 73.1521214,85.1921135 C75.3746408,87.6522607 76.4858838,90.7902992 76.4858838,94.6063231 C76.4858838,98.4503032 75.3816297,101.595331 73.1730884,104.0415 C70.9645471,106.487669 68.1270335,107.710735 64.6604624,107.710735 C61.2358256,107.710735 58.4053009,106.477185 56.1688034,104.010049 C53.9323059,101.542913 52.8140739,98.4083688 52.8140739,94.6063231 C52.8140739,90.7763211 53.9218224,87.6347881 56.1373528,85.1816299 C58.3528831,82.7284717 61.1938912,81.501911 64.6604624,81.501911 L64.6604624,81.501911 Z M87.4611651,98.1707232 L87.4611651,102.46897 L89.6207722,102.46897 C90.5293493,102.46897 91.1758272,102.643694 91.5602252,102.993147 C91.9446232,103.3426 92.1368193,103.922682 92.1368193,104.733413 C92.1368193,105.558121 91.9411287,106.152182 91.5497417,106.515613 C91.1583546,106.879044 90.5153712,107.060756 89.6207722,107.060756 L82.3661697,107.060756 C81.4436145,107.060756 80.7831587,106.879044 80.3847826,106.515613 C79.9864065,106.152182 79.7872214,105.558121 79.7872214,104.733413 C79.7872214,103.992573 79.9759231,103.429962 80.353332,103.045564 C80.730741,102.661166 81.282868,102.46897 82.0097297,102.46897 L82.3661697,102.46897 L82.3661697,86.8275441 L82.0097297,86.8275441 C81.2968461,86.8275441 80.7482136,86.628359 80.3638155,86.2299829 C79.9794175,85.8316068 79.7872214,85.2620074 79.7872214,84.5211676 C79.7872214,83.7104372 79.989901,83.119871 80.3952661,82.7494511 C80.8006313,82.3790312 81.4575926,82.193824 82.3661697,82.193824 L91.0255652,82.193824 C94.450202,82.193824 97.0396079,82.8507853 98.7938606,84.1647276 C100.548113,85.4786699 101.425227,87.414609 101.425227,89.972603 C101.425227,92.6703781 100.551608,94.7111515 98.8043442,96.0949843 C97.0570805,97.4788171 94.4641801,98.1707232 91.0255652,98.1707232 L87.4611651,98.1707232 Z M87.4611651,86.8275441 L87.4611651,93.4531348 L90.4384875,93.4531348 C92.0879044,93.4531348 93.328443,93.1735768 94.1601405,92.6144525 C94.9918381,92.0553281 95.4076806,91.2166541 95.4076806,90.0984053 C95.4076806,89.0500471 94.9778602,88.2428234 94.1182064,87.67671 C93.2585527,87.1105966 92.031992,86.8275441 90.4384875,86.8275441 L87.4611651,86.8275441 Z M114.727851,107.396229 L113.092421,109.03166 C113.69348,108.835966 114.284046,108.689198 114.864137,108.591352 C115.444229,108.493505 116.013828,108.444582 116.572953,108.444582 C117.677223,108.444582 118.840883,108.608823 120.063968,108.937308 C121.287053,109.265794 122.031376,109.430034 122.29696,109.430034 C122.744259,109.430034 123.327837,109.279772 124.047709,108.979242 C124.767582,108.678713 125.253314,108.52845 125.50492,108.52845 C126.02211,108.52845 126.45193,108.727636 126.794394,109.126012 C127.136858,109.524388 127.308087,110.024098 127.308087,110.625156 C127.308087,111.421909 126.836333,112.099837 125.892811,112.658961 C124.949288,113.218086 123.792617,113.497643 122.422762,113.497643 C121.486229,113.497643 120.28413,113.277492 118.816428,112.837181 C117.348727,112.396871 116.286406,112.176719 115.629435,112.176719 C114.636989,112.176719 113.518757,112.449288 112.274706,112.994434 C111.030654,113.53958 110.261869,113.812149 109.968329,113.812149 C109.36727,113.812149 108.857077,113.612964 108.437734,113.214588 C108.01839,112.816212 107.808722,112.337469 107.808722,111.778345 C107.808722,111.386958 107.941512,110.971115 108.207096,110.530805 C108.47268,110.090494 108.94094,109.520895 109.611889,108.821989 L111.729562,106.683349 C109.395218,105.830685 107.536157,104.29661 106.152324,102.08108 C104.768491,99.8655494 104.076585,97.3180772 104.076585,94.4385866 C104.076585,90.6365409 105.180839,87.5299526 107.389381,85.1187288 C109.597922,82.7075049 112.442425,81.501911 115.922974,81.501911 C119.389545,81.501911 122.227059,82.7109994 124.4356,85.1292123 C126.644141,87.5474252 127.748395,90.650519 127.748395,94.4385866 C127.748395,98.2126762 126.65113,101.322759 124.456567,103.768928 C122.262004,106.215097 119.480402,107.438163 116.111677,107.438163 C115.888028,107.438163 115.660887,107.434669 115.430248,107.42768 C115.199609,107.420691 114.965479,107.410207 114.727851,107.396229 L114.727851,107.396229 Z M115.922974,86.3662688 C114.119798,86.3662688 112.704535,87.0931196 111.677144,88.546843 C110.649753,90.0005663 110.136065,92.0203728 110.136065,94.6063231 C110.136065,97.1922733 110.653248,99.2190688 111.687628,100.68677 C112.722008,102.154472 114.133776,102.888312 115.922974,102.888312 C117.72615,102.888312 119.144907,102.157966 120.179287,100.697254 C121.213667,99.2365414 121.730849,97.2062514 121.730849,94.6063231 C121.730849,92.0203728 121.217161,90.0005663 120.18977,88.546843 C119.162379,87.0931196 117.740128,86.3662688 115.922974,86.3662688 L115.922974,86.3662688 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/mono-line": {
            "title": "$:/core/images/mono-line",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-mono-line tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M60.4374591,84.522627 L61.3450888,84.522627 C63.2671377,84.522627 64.7264493,85.0120303 65.7230673,85.9908515 C66.7196852,86.9696727 67.2179868,88.4022896 67.2179868,90.288745 C67.2179868,92.3887615 66.6929905,93.9014625 65.6429823,94.8268935 C64.5929741,95.7523244 62.857817,96.215033 60.4374591,96.215033 L44.3670747,96.215033 C41.9111232,96.215033 40.1670679,95.7612227 39.1348565,94.8535884 C38.102645,93.9459542 37.586547,92.424355 37.586547,90.288745 C37.586547,88.2243221 38.102645,86.747214 39.1348565,85.8573766 C40.1670679,84.9675391 41.9111232,84.522627 44.3670747,84.522627 L46.235724,84.522627 L44.0467348,78.2759992 L20.9822627,78.2759992 L18.6864935,84.522627 L20.5551429,84.522627 C22.9755008,84.522627 24.7106579,84.9764373 25.7606661,85.8840716 C26.8106743,86.7917058 27.3356705,88.2599156 27.3356705,90.288745 C27.3356705,92.3887615 26.8106743,93.9014625 25.7606661,94.8268935 C24.7106579,95.7523244 22.9755008,96.215033 20.5551429,96.215033 L6.78052766,96.215033 C4.32457622,96.215033 2.58052094,95.7523244 1.54830946,94.8268935 C0.516097994,93.9014625 0,92.3887615 0,90.288745 C0,88.4022896 0.498301511,86.9696727 1.49491948,85.9908515 C2.49153745,85.0120303 3.95084902,84.522627 5.87289797,84.522627 L6.78052766,84.522627 L21.0890427,44.6937008 L16.8178442,44.6937008 C14.3974863,44.6937008 12.6623292,44.2309922 11.612321,43.3055613 C10.5623128,42.3801303 10.0373165,40.8852258 10.0373165,38.8208028 C10.0373165,36.7207864 10.5623128,35.2080854 11.612321,34.2826544 C12.6623292,33.3572234 14.3974863,32.8945149 16.8178442,32.8945149 L36.8390873,32.8945149 C40.0069087,32.8945149 42.231469,34.6029772 43.512835,38.0199531 L43.512835,38.180123 L60.4374591,84.522627 Z M32.4611088,44.6937008 L24.7195615,67.224273 L40.2026561,67.224273 L32.4611088,44.6937008 Z M89.5058233,68.5590225 L89.5058233,84.8429669 L97.5143205,84.8429669 C103.173687,84.8429669 107.160099,84.22009 109.473676,82.9743176 C111.787254,81.7285451 112.944025,79.6463566 112.944025,76.7276897 C112.944025,73.7734293 111.840643,71.6734444 109.633846,70.4276719 C107.427049,69.1818994 103.565213,68.5590225 98.0482204,68.5590225 L89.5058233,68.5590225 Z M116.734714,62.6327346 C120.614405,64.0564746 123.461842,66.0051894 125.277111,68.4789376 C127.092379,70.9526857 128,74.1115614 128,77.9556593 C128,81.1946677 127.216955,84.1488838 125.650841,86.8183962 C124.084727,89.4879087 121.84237,91.676876 118.923703,93.385364 C117.215215,94.3819819 115.302093,95.1027395 113.18428,95.5476582 C111.066467,95.9925769 108.06776,96.215033 104.188068,96.215033 L99.7033098,96.215033 L76.3184979,96.215033 C73.9693269,96.215033 72.2875593,95.7523244 71.2731446,94.8268935 C70.2587299,93.9014625 69.7515301,92.3887615 69.7515301,90.288745 C69.7515301,88.4022896 70.2320352,86.9696727 71.1930596,85.9908515 C72.1540841,85.0120303 73.5600062,84.522627 75.4108682,84.522627 L76.3184979,84.522627 L76.3184979,44.6937008 L75.4108682,44.6937008 C73.5600062,44.6937008 72.1540841,44.1953993 71.1930596,43.1987813 C70.2320352,42.2021633 69.7515301,40.7428518 69.7515301,38.8208028 C69.7515301,36.7563799 70.2676281,35.2525771 71.2998396,34.3093494 C72.3320511,33.3661217 74.0049204,32.8945149 76.3184979,32.8945149 L100.877889,32.8945149 C108.388118,32.8945149 114.09189,34.3538264 117.989378,37.2724934 C121.886867,40.1911603 123.835581,44.4623161 123.835581,50.0860889 C123.835581,52.8623819 123.239399,55.3093982 122.047017,57.4272114 C120.854635,59.5450246 119.083885,61.2801816 116.734714,62.6327346 L116.734714,62.6327346 Z M89.5058233,44.3733609 L89.5058233,57.8276363 L96.7134708,57.8276363 C101.091471,57.8276363 104.179161,57.3115383 105.976633,56.2793268 C107.774104,55.2471153 108.672827,53.50306 108.672827,51.0471086 C108.672827,48.7335312 107.863087,47.0428653 106.243583,45.9750604 C104.624078,44.9072554 101.999097,44.3733609 98.3685602,44.3733609 L89.5058233,44.3733609 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/new-button": {
            "title": "$:/core/images/new-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-new-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M56,72 L8.00697327,72 C3.59075293,72 0,68.418278 0,64 C0,59.5907123 3.58484404,56 8.00697327,56 L56,56 L56,8.00697327 C56,3.59075293 59.581722,0 64,0 C68.4092877,0 72,3.58484404 72,8.00697327 L72,56 L119.993027,56 C124.409247,56 128,59.581722 128,64 C128,68.4092877 124.415156,72 119.993027,72 L72,72 L72,119.993027 C72,124.409247 68.418278,128 64,128 C59.5907123,128 56,124.415156 56,119.993027 L56,72 L56,72 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/new-here-button": {
            "title": "$:/core/images/new-here-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-new-here-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n    \t<g transform=\"translate(52.233611, 64.389922) rotate(75.000000) translate(-52.233611, -64.389922) translate(-7.734417, 3.702450)\">\n\t        <path d=\"M18.9270186,45.959338 L18.9080585,49.6521741 C18.8884833,53.4648378 21.0574548,58.7482162 23.7526408,61.4434022 L78.5671839,116.257945 C81.2617332,118.952495 85.6348701,118.950391 88.3334363,116.251825 L115.863237,88.7220241 C118.555265,86.0299959 118.564544,81.6509578 115.869358,78.9557717 L61.0548144,24.1412286 C58.3602652,21.4466794 53.0787224,19.2788426 49.2595808,19.3006519 L25.9781737,19.4336012 C22.1633003,19.4553862 19.0471195,22.5673232 19.0275223,26.3842526 L18.9871663,34.2443819 C19.0818862,34.255617 19.1779758,34.2665345 19.2754441,34.2771502 C22.6891275,34.6489512 27.0485594,34.2348566 31.513244,33.2285542 C31.7789418,32.8671684 32.075337,32.5211298 32.4024112,32.1940556 C34.8567584,29.7397084 38.3789778,29.0128681 41.4406288,30.0213822 C41.5958829,29.9543375 41.7503946,29.8866669 41.9041198,29.8183808 L42.1110981,30.2733467 C43.1114373,30.6972371 44.0473796,31.3160521 44.8614145,32.1300869 C48.2842088,35.5528813 48.2555691,41.130967 44.7974459,44.5890903 C41.4339531,47.952583 36.0649346,48.0717177 32.6241879,44.9262969 C27.8170558,45.8919233 23.0726921,46.2881596 18.9270186,45.959338 Z\"></path>\n\t        <path d=\"M45.4903462,38.8768094 C36.7300141,42.6833154 26.099618,44.7997354 18.1909048,43.9383587 C7.2512621,42.7468685 1.50150083,35.8404432 4.66865776,24.7010202 C7.51507386,14.6896965 15.4908218,6.92103848 24.3842626,4.38423012 C34.1310219,1.60401701 42.4070208,6.15882777 42.4070209,16.3101169 L34.5379395,16.310117 C34.5379394,11.9285862 31.728784,10.3825286 26.5666962,11.8549876 C20.2597508,13.6540114 14.3453742,19.4148216 12.2444303,26.8041943 C10.4963869,32.9523565 12.6250796,35.5092726 19.0530263,36.2093718 C25.5557042,36.9176104 35.0513021,34.9907189 42.7038419,31.5913902 L42.7421786,31.6756595 C44.3874154,31.5384763 47.8846101,37.3706354 45.9274416,38.6772897 L45.9302799,38.6835285 C45.9166992,38.6895612 45.9031139,38.6955897 45.8895238,38.7016142 C45.8389288,38.7327898 45.7849056,38.7611034 45.7273406,38.7863919 C45.6506459,38.8200841 45.571574,38.8501593 45.4903462,38.8768094 Z\"></path>\n        </g>\n        <rect x=\"96\" y=\"80\" width=\"16\" height=\"48\" rx=\"8\"></rect>\n        <rect x=\"80\" y=\"96\" width=\"48\" height=\"16\" rx=\"8\"></rect>\n    </g>\n    </g>\n</svg>"
        },
        "$:/core/images/new-image-button": {
            "title": "$:/core/images/new-image-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-new-image-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M81.3619177,73.6270062 L97.1875317,46.2162388 C97.91364,44.9585822 97.4824378,43.3533085 96.2260476,42.6279312 L46.2162388,13.7547547 C44.9585822,13.0286463 43.3533085,13.4598485 42.6279312,14.7162388 L30.0575956,36.4886988 L40.0978909,31.2276186 C43.1404959,29.6333041 46.8692155,31.3421319 47.6479264,34.6877101 L51.2545483,52.3903732 L61.1353556,53.2399953 C63.2899974,53.4346096 65.1046382,54.9309951 65.706105,57.0091178 C65.7395572,57.1246982 65.8069154,57.3539875 65.9047035,57.6813669 C66.0696435,58.2335608 66.2581528,58.852952 66.4667073,59.5238092 C67.0618822,61.4383079 67.6960725,63.3742727 68.3393254,65.2021174 C68.5462918,65.7902259 68.7511789,66.3583016 68.953259,66.9034738 C69.5777086,68.5881157 70.1617856,70.0172008 70.6783305,71.110045 C70.9334784,71.6498566 71.1627732,72.0871602 71.4035746,72.5373068 C71.6178999,72.7492946 71.9508843,72.9623307 72.4151452,73.1586945 C73.5561502,73.6412938 75.1990755,73.899146 77.0720271,73.9171651 C77.9355886,73.9254732 78.7819239,73.8832103 79.5638842,73.8072782 C80.0123946,73.7637257 80.3172916,73.7224469 80.4352582,73.7027375 C80.7503629,73.6500912 81.0598053,73.6256267 81.3619177,73.6270062 L81.3619177,73.6270062 L81.3619177,73.6270062 L81.3619177,73.6270062 Z M37.4707881,2.64867269 C38.9217993,0.135447653 42.1388058,-0.723707984 44.6486727,0.725364314 L108.293614,37.4707881 C110.806839,38.9217993 111.665994,42.1388058 110.216922,44.6486727 L73.4714982,108.293614 C72.0204871,110.806839 68.8034805,111.665994 66.2936136,110.216922 L2.64867269,73.4714982 C0.135447653,72.0204871 -0.723707984,68.8034805 0.725364314,66.2936136 L37.4707881,2.64867269 L37.4707881,2.64867269 L37.4707881,2.64867269 L37.4707881,2.64867269 Z M80.3080975,53.1397764 C82.8191338,54.5895239 86.0299834,53.7291793 87.4797308,51.218143 C88.9294783,48.7071068 88.0691338,45.4962571 85.5580975,44.0465097 C83.0470612,42.5967622 79.8362116,43.4571068 78.3864641,45.968143 C76.9367166,48.4791793 77.7970612,51.6900289 80.3080975,53.1397764 L80.3080975,53.1397764 L80.3080975,53.1397764 L80.3080975,53.1397764 Z M96,112 L88.0070969,112 C83.5881712,112 80,108.418278 80,104 C80,99.5907123 83.5848994,96 88.0070969,96 L96,96 L96,88.0070969 C96,83.5881712 99.581722,80 104,80 C108.409288,80 112,83.5848994 112,88.0070969 L112,96 L119.992903,96 C124.411829,96 128,99.581722 128,104 C128,108.409288 124.415101,112 119.992903,112 L112,112 L112,119.992903 C112,124.411829 108.418278,128 104,128 C99.5907123,128 96,124.415101 96,119.992903 L96,112 L96,112 Z M33.3471097,51.7910932 C40.7754579,59.7394511 42.3564368,62.4818351 40.7958321,65.1848818 C39.2352273,67.8879286 26.9581062,62.8571718 24.7019652,66.7649227 C22.4458242,70.6726735 23.7947046,70.0228006 22.2648667,72.6725575 L41.9944593,84.0634431 C41.9944593,84.0634431 36.3904568,75.8079231 37.7602356,73.4353966 C40.2754811,69.0788636 46.5298923,72.1787882 48.1248275,69.4162793 C50.538989,65.234829 43.0222016,59.7770885 33.3471097,51.7910932 L33.3471097,51.7910932 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/new-journal-button": {
            "title": "$:/core/images/new-journal-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-new-journal-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M102.545455,112.818182 L102.545455,124.636364 L102.545455,124.636364 L102.545455,124.636364 C102.545455,125.941761 103.630828,127 104.969697,127 L111.030303,127 C112.369172,127 113.454545,125.941761 113.454545,124.636364 L113.454545,112.818182 L125.575758,112.818182 C126.914626,112.818182 128,111.759982 128,110.454545 L128,104.545455 C128,103.240018 126.914626,102.181818 125.575758,102.181818 L113.454545,102.181818 L113.454545,90.3636364 C113.454545,89.0582 112.369172,88 111.030303,88 L104.969697,88 L104.969697,88 C103.630828,88 102.545455,89.0582 102.545455,90.3636364 L102.545455,102.181818 L90.4242424,102.181818 L90.4242424,102.181818 C89.0853705,102.181818 88,103.240018 88,104.545455 L88,110.454545 L88,110.454545 L88,110.454545 C88,111.759982 89.0853705,112.818182 90.4242424,112.818182 L102.545455,112.818182 Z\"></path>\n        <g transform=\"translate(59.816987, 64.316987) rotate(30.000000) translate(-59.816987, -64.316987) translate(20.316987, 12.816987)\">\n            <g transform=\"translate(0.000000, 0.000000)\">\n                <path d=\"M9.99631148,0 C4.4755011,0 -2.27373675e-13,4.48070044 -2.27373675e-13,9.99759461 L-2.27373675e-13,91.6128884 C-2.27373675e-13,97.1344074 4.46966773,101.610483 9.99631148,101.610483 L68.9318917,101.610483 C74.4527021,101.610483 78.9282032,97.1297826 78.9282032,91.6128884 L78.9282032,9.99759461 C78.9282032,4.47607557 74.4585355,0 68.9318917,0 L9.99631148,0 Z M20.8885263,26 C24.2022348,26 26.8885263,23.3137085 26.8885263,20 C26.8885263,16.6862915 24.2022348,14 20.8885263,14 C17.5748178,14 14.8885263,16.6862915 14.8885263,20 C14.8885263,23.3137085 17.5748178,26 20.8885263,26 Z M57.3033321,25.6783342 C60.6170406,25.6783342 63.3033321,22.9920427 63.3033321,19.6783342 C63.3033321,16.3646258 60.6170406,13.6783342 57.3033321,13.6783342 C53.9896236,13.6783342 51.3033321,16.3646258 51.3033321,19.6783342 C51.3033321,22.9920427 53.9896236,25.6783342 57.3033321,25.6783342 Z\"></path>\n                <text font-family=\"Helvetica\" font-size=\"47.1724138\" font-weight=\"bold\" fill=\"#FFFFFF\">\n                    <tspan x=\"42\" y=\"77.4847912\" text-anchor=\"middle\"><<now \"DD\">></tspan>\n                </text>\n            </g>\n        </g>\n    </g>\n</svg>"
        },
        "$:/core/images/opacity": {
            "title": "$:/core/images/opacity",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-opacity tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M102.361773,65 C101.833691,67.051742 101.183534,69.0544767 100.419508,71 L82.5835324,71 C83.7602504,69.1098924 84.7666304,67.1027366 85.581205,65 L102.361773,65 Z M102.834311,63 C103.256674,61.0388326 103.568427,59.0365486 103.762717,57 L87.6555706,57 C87.3692052,59.0609452 86.9083652,61.0660782 86.2884493,63 L102.834311,63 Z M99.5852583,73 C98.6682925,75.0747721 97.6196148,77.0783056 96.4498253,79 L75.8124196,79 C77.8387053,77.2115633 79.6621163,75.1985844 81.2437158,73 L99.5852583,73 Z M95.1689122,81 C93.7449202,83.1155572 92.1695234,85.1207336 90.458251,87 L60.4614747,87 C65.1836162,85.86248 69.5430327,83.794147 73.3347255,81 L95.1689122,81 Z M87.6555706,47 L103.762717,47 C101.246684,20.6269305 79.0321807,0 52,0 C23.281193,0 0,23.281193 0,52 C0,77.2277755 17.9651296,98.2595701 41.8000051,103 L62.1999949,103 C67.8794003,101.870444 73.2255333,99.8158975 78.074754,97 L39,97 L39,95 L81.2493857,95 C83.8589242,93.2215015 86.2981855,91.2116653 88.5376609,89 L39,89 L39,87 L43.5385253,87 C27.7389671,83.1940333 16,68.967908 16,52 C16,32.117749 32.117749,16 52,16 C70.1856127,16 85.2217929,29.4843233 87.6555706,47 Z M87.8767787,49 L103.914907,49 C103.971379,49.9928025 104,50.9930589 104,52 C104,53.0069411 103.971379,54.0071975 103.914907,55 L87.8767787,55 C87.958386,54.0107999 88,53.0102597 88,52 C88,50.9897403 87.958386,49.9892001 87.8767787,49 Z\"></path>\n        <path d=\"M76,128 C104.718807,128 128,104.718807 128,76 C128,47.281193 104.718807,24 76,24 C47.281193,24 24,47.281193 24,76 C24,104.718807 47.281193,128 76,128 L76,128 Z M76,112 C95.882251,112 112,95.882251 112,76 C112,56.117749 95.882251,40 76,40 C56.117749,40 40,56.117749 40,76 C40,95.882251 56.117749,112 76,112 L76,112 Z\"></path>\n        <path d=\"M37,58 L90,58 L90,62 L37,62 L37,58 L37,58 Z M40,50 L93,50 L93,54 L40,54 L40,50 L40,50 Z M40,42 L93,42 L93,46 L40,46 L40,42 L40,42 Z M32,66 L85,66 L85,70 L32,70 L32,66 L32,66 Z M30,74 L83,74 L83,78 L30,78 L30,74 L30,74 Z M27,82 L80,82 L80,86 L27,86 L27,82 L27,82 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/open-window": {
            "title": "$:/core/images/open-window",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-open-window tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M16,112 L104.993898,112 C108.863261,112 112,115.590712 112,120 C112,124.418278 108.858091,128 104.993898,128 L7.00610161,128 C3.13673853,128 0,124.409288 0,120 C0,119.998364 4.30952878e-07,119.996727 1.29273572e-06,119.995091 C4.89579306e-07,119.993456 0,119.99182 0,119.990183 L0,24.0098166 C0,19.586117 3.59071231,16 8,16 C12.418278,16 16,19.5838751 16,24.0098166 L16,112 Z\"></path>\n        <path d=\"M96,43.1959595 L96,56 C96,60.418278 99.581722,64 104,64 C108.418278,64 112,60.418278 112,56 L112,24 C112,19.5907123 108.415101,16 103.992903,16 L72.0070969,16 C67.5881712,16 64,19.581722 64,24 C64,28.4092877 67.5848994,32 72.0070969,32 L84.5685425,32 L48.2698369,68.2987056 C45.1421332,71.4264093 45.1434327,76.4904296 48.267627,79.614624 C51.3854642,82.7324612 56.4581306,82.7378289 59.5835454,79.6124141 L96,43.1959595 Z M32,7.9992458 C32,3.58138434 35.5881049,0 39.9992458,0 L120.000754,0 C124.418616,0 128,3.5881049 128,7.9992458 L128,88.0007542 C128,92.4186157 124.411895,96 120.000754,96 L39.9992458,96 C35.5813843,96 32,92.4118951 32,88.0007542 L32,7.9992458 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/options-button": {
            "title": "$:/core/images/options-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-options-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M110.48779,76.0002544 C109.354214,80.4045063 107.611262,84.5641217 105.354171,88.3838625 L105.354171,88.3838625 L112.07833,95.1080219 C115.20107,98.2307613 115.210098,103.299824 112.089164,106.420759 L106.420504,112.089418 C103.301049,115.208874 98.2346851,115.205502 95.1077675,112.078585 L88.3836082,105.354425 C84.5638673,107.611516 80.4042519,109.354468 76,110.488045 L76,110.488045 L76,119.993281 C76,124.409501 72.4220153,128.000254 68.0083475,128.000254 L59.9916525,128.000254 C55.5800761,128.000254 52,124.41541 52,119.993281 L52,110.488045 C47.5957481,109.354468 43.4361327,107.611516 39.6163918,105.354425 L32.8922325,112.078585 C29.7694931,115.201324 24.7004301,115.210353 21.5794957,112.089418 L15.9108363,106.420759 C12.7913807,103.301303 12.7947522,98.2349395 15.9216697,95.1080219 L22.6458291,88.3838625 C20.3887383,84.5641217 18.6457859,80.4045063 17.5122098,76.0002544 L8.00697327,76.0002544 C3.59075293,76.0002544 2.19088375e-16,72.4222697 4.89347582e-16,68.0086019 L9.80228577e-16,59.9919069 C1.25035972e-15,55.5803305 3.58484404,52.0002544 8.00697327,52.0002544 L17.5122098,52.0002544 C18.6457859,47.5960025 20.3887383,43.4363871 22.6458291,39.6166462 L15.9216697,32.8924868 C12.7989304,29.7697475 12.7899019,24.7006845 15.9108363,21.5797501 L21.5794957,15.9110907 C24.6989513,12.7916351 29.7653149,12.7950065 32.8922325,15.9219241 L39.6163918,22.6460835 C43.4361327,20.3889927 47.5957481,18.6460403 52,17.5124642 L52,8.00722764 C52,3.5910073 55.5779847,0.000254375069 59.9916525,0.000254375069 L68.0083475,0.000254375069 C72.4199239,0.000254375069 76,3.58509841 76,8.00722764 L76,17.5124642 C80.4042519,18.6460403 84.5638673,20.3889927 88.3836082,22.6460835 L95.1077675,15.9219241 C98.2305069,12.7991848 103.29957,12.7901562 106.420504,15.9110907 L112.089164,21.5797501 C115.208619,24.6992057 115.205248,29.7655693 112.07833,32.8924868 L105.354171,39.6166462 L105.354171,39.6166462 C107.611262,43.4363871 109.354214,47.5960025 110.48779,52.0002544 L119.993027,52.0002544 C124.409247,52.0002544 128,55.5782391 128,59.9919069 L128,68.0086019 C128,72.4201783 124.415156,76.0002544 119.993027,76.0002544 L110.48779,76.0002544 L110.48779,76.0002544 Z M64,96.0002544 C81.673112,96.0002544 96,81.6733664 96,64.0002544 C96,46.3271424 81.673112,32.0002544 64,32.0002544 C46.326888,32.0002544 32,46.3271424 32,64.0002544 C32,81.6733664 46.326888,96.0002544 64,96.0002544 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/paint": {
            "title": "$:/core/images/paint",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-paint tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M83.5265806,76.1907935 C90.430962,69.2864121 91.8921169,59.0000433 87.9100453,50.6642209 L125.812763,12.7615036 C128.732035,9.84223095 128.72611,5.10322984 125.812796,2.18991592 C122.893542,-0.729338085 118.161775,-0.730617045 115.241209,2.18994966 L77.3384914,40.092667 C69.002669,36.1105954 58.7163002,37.5717503 51.8119188,44.4761317 L83.5265806,76.1907935 L83.5265806,76.1907935 L83.5265806,76.1907935 L83.5265806,76.1907935 Z M80.8836921,78.8336819 L49.1690303,47.1190201 C49.1690303,47.1190201 8.50573364,81.242543 0,80.2820711 C0,80.2820711 3.78222974,85.8744423 6.82737483,88.320684 C20.8514801,82.630792 44.1526049,63.720771 44.1526049,63.720771 L44.8144806,64.3803375 C44.8144806,64.3803375 19.450356,90.2231043 9.18040433,92.0477601 C10.4017154,93.4877138 13.5343883,96.1014812 15.4269991,97.8235871 C20.8439164,96.3356979 50.1595367,69.253789 50.1595367,69.253789 L50.8214124,69.9133555 L18.4136144,100.936036 L23.6993903,106.221812 L56.1060358,75.2002881 L56.7679115,75.8598546 C56.7679115,75.8598546 28.9040131,106.396168 28.0841366,108.291555 C28.0841366,108.291555 34.1159238,115.144621 35.6529617,116.115796 C36.3545333,113.280171 63.5365402,82.6307925 63.5365402,82.6307925 L64.1984159,83.290359 C64.1984159,83.290359 43.6013016,107.04575 39.2343772,120.022559 C42.443736,123.571575 46.7339155,125.159692 50.1595362,126.321151 C47.9699978,114.504469 80.8836921,78.8336819 80.8836921,78.8336819 L80.8836921,78.8336819 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/palette": {
            "title": "$:/core/images/palette",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-palette tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M80.2470434,39.1821571 C75.0645698,38.2680897 69.6261555,37.7814854 64.0193999,37.7814854 C28.6624616,37.7814854 0,57.1324214 0,81.0030106 C0,90.644534 4.67604329,99.5487133 12.5805659,106.738252 C23.5031767,91.1899067 26.3405471,72.3946229 36.8885698,63.5622337 C52.0716764,50.8486559 63.4268694,55.7343343 63.4268694,55.7343343 L80.2470434,39.1821571 Z M106.781666,48.8370714 C119.830962,56.749628 128.0388,68.229191 128.0388,81.0030106 C128.0388,90.3534932 128.557501,98.4142085 116.165191,106.082518 C105.367708,112.763955 112.341384,99.546808 104.321443,95.1851533 C96.3015017,90.8234987 84.3749007,96.492742 86.1084305,103.091059 C89.3087234,115.272303 105.529892,114.54645 92.4224435,119.748569 C79.3149955,124.950687 74.2201582,124.224536 64.0193999,124.224536 C56.1979176,124.224536 48.7040365,123.277578 41.7755684,121.544216 C51.620343,117.347916 69.6563669,109.006202 75.129737,102.088562 C82.7876655,92.4099199 87.3713218,80.0000002 83.3235694,72.4837191 C83.1303943,72.1250117 94.5392656,60.81569 106.781666,48.8370714 Z M1.13430476,123.866563 C0.914084026,123.867944 0.693884185,123.868637 0.473712455,123.868637 C33.9526848,108.928928 22.6351223,59.642592 59.2924543,59.6425917 C59.6085574,61.0606542 59.9358353,62.5865065 60.3541977,64.1372318 C34.4465025,59.9707319 36.7873124,112.168427 1.13429588,123.866563 L1.13430476,123.866563 Z M1.84669213,123.859694 C40.7185279,123.354338 79.9985412,101.513051 79.9985401,79.0466836 C70.7284906,79.0466835 65.9257264,75.5670082 63.1833375,71.1051511 C46.585768,64.1019718 32.81846,116.819636 1.84665952,123.859695 L1.84669213,123.859694 Z M67.1980193,59.8524981 C62.748213,63.9666823 72.0838429,76.2846822 78.5155805,71.1700593 C89.8331416,59.8524993 112.468264,37.2173758 123.785825,25.8998146 C135.103386,14.5822535 123.785825,3.26469247 112.468264,14.5822535 C101.150703,25.8998144 78.9500931,48.9868127 67.1980193,59.8524981 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/permalink-button": {
            "title": "$:/core/images/permalink-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-permalink-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M80.4834582,48 L73.0956761,80 L73.0956761,80 L47.5165418,80 L54.9043239,48 L80.4834582,48 Z M84.1773493,32 L89.8007299,7.64246248 C90.7941633,3.33942958 95.0918297,0.64641956 99.3968675,1.64031585 C103.693145,2.63218977 106.385414,6.93288901 105.390651,11.2416793 L100.598215,32 L104.000754,32 C108.411895,32 112,35.581722 112,40 C112,44.4092877 108.418616,48 104.000754,48 L96.9043239,48 L89.5165418,80 L104.000754,80 C108.411895,80 112,83.581722 112,88 C112,92.4092877 108.418616,96 104.000754,96 L85.8226507,96 L80.1992701,120.357538 C79.2058367,124.66057 74.9081703,127.35358 70.6031325,126.359684 C66.3068546,125.36781 63.6145865,121.067111 64.6093491,116.758321 L69.401785,96 L43.8226507,96 L38.1992701,120.357538 C37.2058367,124.66057 32.9081703,127.35358 28.6031325,126.359684 C24.3068546,125.36781 21.6145865,121.067111 22.6093491,116.758321 L27.401785,96 L23.9992458,96 C19.5881049,96 16,92.418278 16,88 C16,83.5907123 19.5813843,80 23.9992458,80 L31.0956761,80 L38.4834582,48 L23.9992458,48 C19.5881049,48 16,44.418278 16,40 C16,35.5907123 19.5813843,32 23.9992458,32 L42.1773493,32 L47.8007299,7.64246248 C48.7941633,3.33942958 53.0918297,0.64641956 57.3968675,1.64031585 C61.6931454,2.63218977 64.3854135,6.93288901 63.3906509,11.2416793 L58.598215,32 L84.1773493,32 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/permaview-button": {
            "title": "$:/core/images/permaview-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-permaview-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M81.4834582,48 L79.6365127,56 L79.6365127,56 L74.0573784,56 L75.9043239,48 L81.4834582,48 Z M85.1773493,32 L90.8007299,7.64246248 C91.7941633,3.33942958 96.0918297,0.64641956 100.396867,1.64031585 C104.693145,2.63218977 107.385414,6.93288901 106.390651,11.2416793 L101.598215,32 L104.000754,32 C108.411895,32 112,35.581722 112,40 C112,44.4092877 108.418616,48 104.000754,48 L97.9043239,48 L96.0573784,56 L104.000754,56 C108.411895,56 112,59.581722 112,64 C112,68.4092877 108.418616,72 104.000754,72 L92.3634873,72 L90.5165418,80 L104.000754,80 C108.411895,80 112,83.581722 112,88 C112,92.4092877 108.418616,96 104.000754,96 L86.8226507,96 L81.1992701,120.357538 C80.2058367,124.66057 75.9081703,127.35358 71.6031325,126.359684 C67.3068546,125.36781 64.6145865,121.067111 65.6093491,116.758321 L70.401785,96 L64.8226507,96 L59.1992701,120.357538 C58.2058367,124.66057 53.9081703,127.35358 49.6031325,126.359684 C45.3068546,125.36781 42.6145865,121.067111 43.6093491,116.758321 L48.401785,96 L42.8226507,96 L37.1992701,120.357538 C36.2058367,124.66057 31.9081703,127.35358 27.6031325,126.359684 C23.3068546,125.36781 20.6145865,121.067111 21.6093491,116.758321 L26.401785,96 L23.9992458,96 C19.5881049,96 16,92.418278 16,88 C16,83.5907123 19.5813843,80 23.9992458,80 L30.0956761,80 L31.9426216,72 L23.9992458,72 C19.5881049,72 16,68.418278 16,64 C16,59.5907123 19.5813843,56 23.9992458,56 L35.6365127,56 L37.4834582,48 L23.9992458,48 C19.5881049,48 16,44.418278 16,40 C16,35.5907123 19.5813843,32 23.9992458,32 L41.1773493,32 L46.8007299,7.64246248 C47.7941633,3.33942958 52.0918297,0.64641956 56.3968675,1.64031585 C60.6931454,2.63218977 63.3854135,6.93288901 62.3906509,11.2416793 L57.598215,32 L63.1773493,32 L68.8007299,7.64246248 C69.7941633,3.33942958 74.0918297,0.64641956 78.3968675,1.64031585 C82.6931454,2.63218977 85.3854135,6.93288901 84.3906509,11.2416793 L79.598215,32 L85.1773493,32 Z M53.9043239,48 L52.0573784,56 L57.6365127,56 L59.4834582,48 L53.9043239,48 Z M75.9426216,72 L74.0956761,80 L74.0956761,80 L68.5165418,80 L70.3634873,72 L75.9426216,72 L75.9426216,72 Z M48.3634873,72 L46.5165418,80 L52.0956761,80 L53.9426216,72 L48.3634873,72 L48.3634873,72 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/picture": {
            "title": "$:/core/images/picture",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-picture tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M112,68.2332211 L112,20.0027785 C112,17.7898769 110.207895,16 107.997221,16 L20.0027785,16 C17.7898769,16 16,17.792105 16,20.0027785 L16,58.312373 L25.2413115,43.7197989 C28.041793,39.297674 34.2643908,38.7118128 37.8410347,42.5335275 L56.0882845,63.1470817 L69.7748997,56.7400579 C72.766567,55.3552503 76.3013751,55.9473836 78.678437,58.2315339 C78.8106437,58.3585731 79.0742301,58.609836 79.4527088,58.9673596 C80.0910923,59.570398 80.8117772,60.2441563 81.598127,60.9705595 C83.8422198,63.043576 86.1541548,65.1151944 88.3956721,67.0372264 C89.1168795,67.6556396 89.8200801,68.2492007 90.5021258,68.8146755 C92.6097224,70.5620551 94.4693308,72.0029474 95.9836366,73.0515697 C96.7316295,73.5695379 97.3674038,73.9719282 98.0281481,74.3824999 C98.4724987,74.4989557 99.0742374,74.5263881 99.8365134,74.4317984 C101.709944,74.1993272 104.074502,73.2878514 106.559886,71.8846196 C107.705822,71.2376318 108.790494,70.5370325 109.764561,69.8410487 C110.323259,69.4418522 110.694168,69.1550757 110.834827,69.0391868 C111.210545,68.7296319 111.600264,68.4615815 112,68.2332211 L112,68.2332211 Z M0,8.00697327 C0,3.58484404 3.59075293,0 8.00697327,0 L119.993027,0 C124.415156,0 128,3.59075293 128,8.00697327 L128,119.993027 C128,124.415156 124.409247,128 119.993027,128 L8.00697327,128 C3.58484404,128 0,124.409247 0,119.993027 L0,8.00697327 L0,8.00697327 Z M95,42 C99.418278,42 103,38.418278 103,34 C103,29.581722 99.418278,26 95,26 C90.581722,26 87,29.581722 87,34 C87,38.418278 90.581722,42 95,42 L95,42 Z M32,76 C47.8587691,80.8294182 52.0345556,83.2438712 52.0345556,88 C52.0345556,92.7561288 32,95.4712486 32,102.347107 C32,109.222965 33.2849191,107.337637 33.2849191,112 L67.999999,112 C67.999999,112 54.3147136,105.375255 54.3147136,101.200691 C54.3147136,93.535181 64.9302432,92.860755 64.9302432,88 C64.9302432,80.6425555 50.8523779,79.167282 32,76 L32,76 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/plugin-generic-language": {
            "title": "$:/core/images/plugin-generic-language",
            "tags": "$:/tags/Image",
            "text": "<svg width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M61.2072232,68.1369825 C56.8829239,70.9319564 54.2082892,74.793177 54.2082892,79.0581634 C54.2082892,86.9638335 63.3980995,93.4821994 75.2498076,94.3940006 C77.412197,98.2964184 83.8475284,101.178858 91.5684735,101.403106 C86.4420125,100.27851 82.4506393,97.6624107 80.9477167,94.3948272 C92.8046245,93.4861461 102,86.9662269 102,79.0581634 C102,70.5281905 91.3014611,63.6132813 78.1041446,63.6132813 C71.5054863,63.6132813 65.5315225,65.3420086 61.2072232,68.1369825 Z M74.001066,53.9793443 C69.6767667,56.7743182 63.7028029,58.5030456 57.1041446,58.5030456 C54.4851745,58.5030456 51.9646095,58.2307276 49.6065315,57.7275105 C46.2945155,59.9778212 41.2235699,61.4171743 35.5395922,61.4171743 C35.4545771,61.4171743 35.3696991,61.4168523 35.2849622,61.4162104 C39.404008,60.5235193 42.7961717,58.6691298 44.7630507,56.286533 C37.8379411,53.5817651 33.2082892,48.669413 33.2082892,43.0581634 C33.2082892,34.5281905 43.9068281,27.6132812 57.1041446,27.6132812 C70.3014611,27.6132812 81,34.5281905 81,43.0581634 C81,47.3231498 78.3253653,51.1843704 74.001066,53.9793443 Z M64,0 L118.5596,32 L118.5596,96 L64,128 L9.44039956,96 L9.44039956,32 L64,0 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/plugin-generic-plugin": {
            "title": "$:/core/images/plugin-generic-plugin",
            "tags": "$:/tags/Image",
            "text": "<svg width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M40.3972881,76.4456988 L40.3972881,95.3404069 L54.5170166,95.3404069 L54.5170166,95.3404069 C54.5165526,95.3385183 54.516089,95.3366295 54.515626,95.3347404 C54.6093153,95.3385061 54.7034848,95.3404069 54.7980982,95.3404069 C58.6157051,95.3404069 61.710487,92.245625 61.710487,88.4280181 C61.710487,86.6197822 61.01617,84.9737128 59.8795929,83.7418666 L59.8795929,83.7418666 C59.8949905,83.7341665 59.9104102,83.7265043 59.925852,83.7188798 C58.8840576,82.5086663 58.2542926,80.9336277 58.2542926,79.2114996 C58.2542926,75.3938927 61.3490745,72.2991108 65.1666814,72.2991108 C68.9842884,72.2991108 72.0790703,75.3938927 72.0790703,79.2114996 C72.0790703,81.1954221 71.2432806,82.9841354 69.9045961,84.2447446 L69.9045961,84.2447446 C69.9333407,84.2629251 69.9619885,84.281245 69.9905383,84.2997032 L69.9905383,84.2997032 C69.1314315,85.4516923 68.6228758,86.8804654 68.6228758,88.4280181 C68.6228758,91.8584969 71.1218232,94.7053153 74.3986526,95.2474079 C74.3913315,95.2784624 74.3838688,95.3094624 74.3762652,95.3404069 L95.6963988,95.3404069 L95.6963988,75.5678578 L95.6963988,75.5678578 C95.6466539,75.5808558 95.5967614,75.5934886 95.5467242,75.6057531 C95.5504899,75.5120637 95.5523907,75.4178943 95.5523907,75.3232809 C95.5523907,71.505674 92.4576088,68.4108921 88.6400019,68.4108921 C86.831766,68.4108921 85.1856966,69.105209 83.9538504,70.2417862 L83.9538504,70.2417862 C83.9461503,70.2263886 83.938488,70.2109688 83.9308636,70.1955271 C82.7206501,71.2373215 81.1456115,71.8670865 79.4234834,71.8670865 C75.6058765,71.8670865 72.5110946,68.7723046 72.5110946,64.9546976 C72.5110946,61.1370907 75.6058765,58.0423088 79.4234834,58.0423088 C81.4074059,58.0423088 83.1961192,58.8780985 84.4567284,60.2167829 L84.4567284,60.2167829 C84.4749089,60.1880383 84.4932288,60.1593906 84.511687,60.1308407 L84.511687,60.1308407 C85.6636761,60.9899475 87.0924492,61.4985032 88.6400019,61.4985032 C92.0704807,61.4985032 94.9172991,58.9995558 95.4593917,55.7227265 C95.538755,55.7414363 95.6177614,55.761071 95.6963988,55.7816184 L95.6963988,40.0412962 L74.3762652,40.0412962 L74.3762652,40.0412962 C74.3838688,40.0103516 74.3913315,39.9793517 74.3986526,39.9482971 L74.3986526,39.9482971 C71.1218232,39.4062046 68.6228758,36.5593862 68.6228758,33.1289073 C68.6228758,31.5813547 69.1314315,30.1525815 69.9905383,29.0005925 C69.9619885,28.9821342 69.9333407,28.9638143 69.9045961,28.9456339 C71.2432806,27.6850247 72.0790703,25.8963113 72.0790703,23.9123888 C72.0790703,20.0947819 68.9842884,17 65.1666814,17 C61.3490745,17 58.2542926,20.0947819 58.2542926,23.9123888 C58.2542926,25.6345169 58.8840576,27.2095556 59.925852,28.419769 L59.925852,28.419769 C59.9104102,28.4273935 59.8949905,28.4350558 59.8795929,28.4427558 C61.01617,29.674602 61.710487,31.3206715 61.710487,33.1289073 C61.710487,36.9465143 58.6157051,40.0412962 54.7980982,40.0412962 C54.7034848,40.0412962 54.6093153,40.0393953 54.515626,40.0356296 L54.515626,40.0356296 C54.516089,40.0375187 54.5165526,40.0394075 54.5170166,40.0412962 L40.3972881,40.0412962 L40.3972881,52.887664 L40.3972881,52.887664 C40.4916889,53.3430132 40.5412962,53.8147625 40.5412962,54.2980982 C40.5412962,58.1157051 37.4465143,61.210487 33.6289073,61.210487 C32.0813547,61.210487 30.6525815,60.7019313 29.5005925,59.8428245 C29.4821342,59.8713744 29.4638143,59.9000221 29.4456339,59.9287667 C28.1850247,58.5900823 26.3963113,57.7542926 24.4123888,57.7542926 C20.5947819,57.7542926 17.5,60.8490745 17.5,64.6666814 C17.5,68.4842884 20.5947819,71.5790703 24.4123888,71.5790703 C26.134517,71.5790703 27.7095556,70.9493053 28.919769,69.9075109 L28.919769,69.9075109 C28.9273935,69.9229526 28.9350558,69.9383724 28.9427558,69.95377 C30.174602,68.8171928 31.8206715,68.1228758 33.6289073,68.1228758 C37.4465143,68.1228758 40.5412962,71.2176578 40.5412962,75.0352647 C40.5412962,75.5186004 40.4916889,75.9903496 40.3972881,76.4456988 Z M64,0 L118.5596,32 L118.5596,96 L64,128 L9.44039956,96 L9.44039956,32 L64,0 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/plugin-generic-theme": {
            "title": "$:/core/images/plugin-generic-theme",
            "tags": "$:/tags/Image",
            "text": "<svg width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M29.4078519,91.4716406 L51.4693474,69.4101451 L51.4646675,69.4054652 C50.5969502,68.5377479 50.5929779,67.1348725 51.4693474,66.2585029 C52.3396494,65.3882009 53.7499654,65.3874786 54.6163097,66.2538229 L64.0805963,75.7181095 C64.9483136,76.5858268 64.9522859,77.9887022 64.0759163,78.8650718 C63.2056143,79.7353737 61.7952984,79.736096 60.9289541,78.8697517 L60.9242741,78.8650718 L60.9242741,78.8650718 L38.8627786,100.926567 C36.2518727,103.537473 32.0187578,103.537473 29.4078519,100.926567 C26.796946,98.3156614 26.796946,94.0825465 29.4078519,91.4716406 Z M60.8017407,66.3810363 C58.3659178,63.6765806 56.3370667,61.2899536 54.9851735,59.5123615 C48.1295381,50.4979488 44.671561,55.2444054 40.7586738,59.5123614 C36.8457866,63.7803174 41.789473,67.2384487 38.0759896,70.2532832 C34.3625062,73.2681177 34.5917646,74.3131575 28.3243876,68.7977024 C22.0570105,63.2822473 21.6235306,61.7636888 24.5005999,58.6166112 C27.3776691,55.4695337 29.7823103,60.4247912 35.6595047,54.8320442 C41.5366991,49.2392972 36.5996215,44.2825646 36.5996215,44.2825646 C36.5996215,44.2825646 48.8365511,19.267683 65.1880231,21.1152173 C81.5394952,22.9627517 59.0022276,18.7228947 53.3962199,38.3410355 C50.9960082,46.7405407 53.8429162,44.7613399 58.3941742,48.3090467 C59.7875202,49.3951602 64.4244828,52.7100463 70.1884353,56.9943417 L90.8648751,36.3179019 L92.4795866,31.5515482 L100.319802,26.8629752 L103.471444,30.0146174 L98.782871,37.8548326 L94.0165173,39.4695441 L73.7934912,59.6925702 C86.4558549,69.2403631 102.104532,81.8392557 102.104532,86.4016913 C102.104533,93.6189834 99.0337832,97.9277545 92.5695848,95.5655717 C87.8765989,93.8506351 73.8015497,80.3744087 63.8173444,69.668717 L60.9242741,72.5617873 L57.7726319,69.4101451 L60.8017407,66.3810363 L60.8017407,66.3810363 Z M63.9533761,1.42108547e-13 L118.512977,32 L118.512977,96 L63.9533761,128 L9.39377563,96 L9.39377563,32 L63.9533761,1.42108547e-13 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/preview-closed": {
            "title": "$:/core/images/preview-closed",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-preview-closed tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M0.0881363238,64 C-0.210292223,65.8846266 0.249135869,67.8634737 1.4664206,69.4579969 C16.2465319,88.8184886 39.1692554,100.414336 64,100.414336 C88.8307446,100.414336 111.753468,88.8184886 126.533579,69.4579969 C127.750864,67.8634737 128.210292,65.8846266 127.911864,64 C110.582357,78.4158332 88.3036732,87.0858436 64,87.0858436 C39.6963268,87.0858436 17.4176431,78.4158332 0.0881363238,64 Z\"></path>\n        <rect x=\"62\" y=\"96\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(80.000000, 101.000000) rotate(-5.000000) translate(-80.000000, -101.000000) \" x=\"78\" y=\"93\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(48.000000, 101.000000) rotate(-355.000000) translate(-48.000000, -101.000000) \" x=\"46\" y=\"93\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(32.000000, 96.000000) rotate(-350.000000) translate(-32.000000, -96.000000) \" x=\"30\" y=\"88\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(96.000000, 96.000000) rotate(-10.000000) translate(-96.000000, -96.000000) \" x=\"94\" y=\"88\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(112.000000, 88.000000) rotate(-20.000000) translate(-112.000000, -88.000000) \" x=\"110\" y=\"80\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(16.000000, 88.000000) rotate(-340.000000) translate(-16.000000, -88.000000) \" x=\"14\" y=\"80\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n    </g>\n</svg>"
        },
        "$:/core/images/preview-open": {
            "title": "$:/core/images/preview-open",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-preview-open tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M64.1099282,99.5876785 C39.2791836,99.5876785 16.3564602,87.9918313 1.57634884,68.6313396 C-0.378878622,66.070184 -0.378878622,62.5174945 1.57634884,59.9563389 C16.3564602,40.5958472 39.2791836,29 64.1099282,29 C88.9406729,29 111.863396,40.5958472 126.643508,59.9563389 C128.598735,62.5174945 128.598735,66.070184 126.643508,68.6313396 C111.863396,87.9918313 88.9406729,99.5876785 64.1099282,99.5876785 Z M110.213805,67.5808331 C111.654168,66.0569335 111.654168,63.9430665 110.213805,62.4191669 C99.3257042,50.8995835 82.4391647,44 64.1470385,44 C45.8549124,44 28.9683729,50.8995835 18.0802717,62.4191669 C16.6399094,63.9430665 16.6399094,66.0569335 18.0802717,67.5808331 C28.9683729,79.1004165 45.8549124,86 64.1470385,86 C82.4391647,86 99.3257042,79.1004165 110.213805,67.5808331 Z\"></path>\n        <path d=\"M63.5,88 C76.4786916,88 87,77.4786916 87,64.5 C87,51.5213084 76.4786916,41 63.5,41 C50.5213084,41 40,51.5213084 40,64.5 C40,77.4786916 50.5213084,88 63.5,88 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/quote": {
            "title": "$:/core/images/quote",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-quote tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M51.2188077,117.712501 L51.2188077,62.1993386 L27.4274524,62.1993386 C27.4274524,53.3075754 29.1096526,45.797753 32.4741035,39.669646 C35.8385544,33.541539 42.0867267,28.9154883 51.2188077,25.7913554 L51.2188077,2 C43.7689521,2.96127169 36.8599155,5.18417913 30.4914905,8.668789 C24.1230656,12.1533989 18.6559149,16.5391352 14.0898743,21.8261295 C9.52383382,27.1131238 5.97919764,33.2411389 3.45585945,40.2103586 C0.932521268,47.1795784 -0.208971741,54.6293222 0.0313461819,62.5598136 L0.0313461819,117.712501 L51.2188077,117.712501 Z M128,117.712501 L128,62.1993386 L104.208645,62.1993386 C104.208645,53.3075754 105.890845,45.797753 109.255296,39.669646 C112.619747,33.541539 118.867919,28.9154883 128,25.7913554 L128,2 C120.550144,2.96127169 113.641108,5.18417913 107.272683,8.668789 C100.904258,12.1533989 95.4371072,16.5391352 90.8710666,21.8261295 C86.3050261,27.1131238 82.7603899,33.2411389 80.2370517,40.2103586 C77.7137136,47.1795784 76.5722206,54.6293222 76.8125385,62.5598136 L76.8125385,117.712501 L128,117.712501 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/refresh-button": {
            "title": "$:/core/images/refresh-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-refresh-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M106.369002,39.4325143 C116.529932,60.3119371 112.939592,86.1974934 95.5979797,103.539105 C73.7286194,125.408466 38.2713806,125.408466 16.4020203,103.539105 C-5.46734008,81.6697449 -5.46734008,46.2125061 16.4020203,24.3431458 C19.5262146,21.2189514 24.5915344,21.2189514 27.7157288,24.3431458 C30.8399231,27.4673401 30.8399231,32.5326599 27.7157288,35.6568542 C12.0947571,51.2778259 12.0947571,76.6044251 27.7157288,92.2253967 C43.3367004,107.846368 68.6632996,107.846368 84.2842712,92.2253967 C97.71993,78.7897379 99.5995262,58.1740623 89.9230597,42.729491 L83.4844861,54.9932839 C81.4307001,58.9052072 76.5945372,60.4115251 72.682614,58.3577391 C68.7706907,56.3039532 67.2643728,51.4677903 69.3181587,47.555867 L84.4354914,18.7613158 C86.4966389,14.8353707 91.3577499,13.3347805 95.273202,15.415792 L124.145886,30.7612457 C128.047354,32.8348248 129.52915,37.6785572 127.455571,41.5800249 C125.381992,45.4814927 120.53826,46.9632892 116.636792,44.8897102 L106.369002,39.4325143 Z M98.1470904,27.0648707 C97.9798954,26.8741582 97.811187,26.6843098 97.6409651,26.4953413 L98.6018187,26.1987327 L98.1470904,27.0648707 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/right-arrow": {
            "title": "$:/core/images/right-arrow",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-right-arrow tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <path d=\"M80.3563798,109.353315 C78.9238993,110.786918 76.9450203,111.675144 74.7592239,111.675144 L-4.40893546,111.675144 C-8.77412698,111.675144 -12.3248558,108.130732 -12.3248558,103.758478 C-12.3248558,99.3951199 -8.78077754,95.8418109 -4.40893546,95.8418109 L66.8418109,95.8418109 L66.8418109,24.5910645 C66.8418109,20.225873 70.3862233,16.6751442 74.7584775,16.6751442 C79.1218352,16.6751442 82.6751442,20.2192225 82.6751442,24.5910645 L82.6751442,103.759224 C82.6751442,105.941695 81.7891419,107.920575 80.3566508,109.353886 Z\" transform=\"translate(35.175144, 64.175144) rotate(-45.000000) translate(-35.175144, -64.175144) \"></path>\n</svg>"
        },
        "$:/core/images/save-button": {
            "title": "$:/core/images/save-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-save-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M120.78304,34.329058 C125.424287,43.1924006 128.049406,53.2778608 128.049406,63.9764502 C128.049406,99.3226742 99.3956295,127.97645 64.0494055,127.97645 C28.7031816,127.97645 0.0494055385,99.3226742 0.0494055385,63.9764502 C0.0494055385,28.6302262 28.7031816,-0.0235498012 64.0494055,-0.0235498012 C82.8568763,-0.0235498012 99.769563,8.08898558 111.479045,21.0056358 L114.159581,18.3250998 C117.289194,15.1954866 122.356036,15.1939641 125.480231,18.3181584 C128.598068,21.4359957 128.601317,26.5107804 125.473289,29.6388083 L120.78304,34.329058 Z M108.72451,46.3875877 C110.870571,51.8341374 112.049406,57.767628 112.049406,63.9764502 C112.049406,90.4861182 90.5590735,111.97645 64.0494055,111.97645 C37.5397375,111.97645 16.0494055,90.4861182 16.0494055,63.9764502 C16.0494055,37.4667822 37.5397375,15.9764502 64.0494055,15.9764502 C78.438886,15.9764502 91.3495036,22.308215 100.147097,32.3375836 L58.9411255,73.5435552 L41.975581,56.5780107 C38.8486152,53.4510448 33.7746915,53.4551552 30.6568542,56.5729924 C27.5326599,59.6971868 27.5372202,64.7670668 30.6618725,67.8917192 L53.279253,90.5090997 C54.8435723,92.073419 56.8951519,92.8541315 58.9380216,92.8558261 C60.987971,92.8559239 63.0389578,92.0731398 64.6049211,90.5071765 L108.72451,46.3875877 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/size": {
            "title": "$:/core/images/size",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-size tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <path d=\"M92.3431458,26 L83.1715729,35.1715729 C81.6094757,36.73367 81.6094757,39.26633 83.1715729,40.8284271 C84.73367,42.3905243 87.26633,42.3905243 88.8284271,40.8284271 L104.828427,24.8284271 C106.390524,23.26633 106.390524,20.73367 104.828427,19.1715729 L88.8284271,3.17157288 C87.26633,1.60947571 84.73367,1.60947571 83.1715729,3.17157288 C81.6094757,4.73367004 81.6094757,7.26632996 83.1715729,8.82842712 L92.3431457,18 L22,18 C19.790861,18 18,19.790861 18,22 L18,92.3431458 L8.82842712,83.1715729 C7.26632996,81.6094757 4.73367004,81.6094757 3.17157288,83.1715729 C1.60947571,84.73367 1.60947571,87.26633 3.17157288,88.8284271 L19.1715729,104.828427 C20.73367,106.390524 23.26633,106.390524 24.8284271,104.828427 L40.8284271,88.8284271 C42.3905243,87.26633 42.3905243,84.73367 40.8284271,83.1715729 C39.26633,81.6094757 36.73367,81.6094757 35.1715729,83.1715729 L26,92.3431458 L26,22 L22,26 L92.3431458,26 L92.3431458,26 Z M112,52 L112,116 L116,112 L52,112 C49.790861,112 48,113.790861 48,116 C48,118.209139 49.790861,120 52,120 L116,120 C118.209139,120 120,118.209139 120,116 L120,52 C120,49.790861 118.209139,48 116,48 C113.790861,48 112,49.790861 112,52 L112,52 Z\"></path>\n</svg>"
        },
        "$:/core/images/spiral": {
            "title": "$:/core/images/spiral",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-spiral tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"nonzero\">\n        <path d=\"M64.534 68.348c3.39 0 6.097-2.62 6.476-5.968l-4.755-.538 4.75.583c.377-3.07-1.194-6.054-3.89-7.78-2.757-1.773-6.34-2.01-9.566-.7-3.46 1.403-6.14 4.392-7.35 8.148l-.01.026c-1.3 4.08-.72 8.64 1.58 12.52 2.5 4.2 6.77 7.2 11.76 8.27 5.37 1.15 11.11-.05 15.83-3.31 5.04-3.51 8.46-9.02 9.45-15.3 1.05-6.7-.72-13.63-4.92-19.19l.02.02c-4.42-5.93-11.2-9.82-18.78-10.78-7.96-1.01-16.13 1.31-22.59 6.43-6.81 5.39-11.18 13.41-12.11 22.26-.98 9.27 1.87 18.65 7.93 26.02 6.32 7.69 15.6 12.56 25.74 13.48 10.54.96 21.15-2.42 29.45-9.4l.01-.01c8.58-7.25 13.94-17.78 14.86-29.21.94-11.84-2.96-23.69-10.86-32.9-8.19-9.5-19.95-15.36-32.69-16.27-13.16-.94-26.24 3.49-36.34 12.34l.01-.01c-10.41 9.08-16.78 22.1-17.68 36.15-.93 14.44 4.03 28.77 13.79 39.78 10.03 11.32 24.28 18.2 39.6 19.09 15.73.92 31.31-4.56 43.24-15.234 12.23-10.954 19.61-26.44 20.5-43.074.14-2.64-1.89-4.89-4.52-5.03-2.64-.14-4.89 1.88-5.03 4.52-.75 14.1-7 27.2-17.33 36.45-10.03 8.98-23.11 13.58-36.3 12.81-12.79-.75-24.67-6.48-33-15.89-8.07-9.11-12.17-20.94-11.41-32.827.74-11.52 5.942-22.15 14.43-29.54l.01-.01c8.18-7.17 18.74-10.75 29.35-9.998 10.21.726 19.6 5.41 26.11 12.96 6.24 7.273 9.32 16.61 8.573 25.894-.718 8.9-4.88 17.064-11.504 22.66l.01-.007c-6.36 5.342-14.44 7.92-22.425 7.19-7.604-.68-14.52-4.314-19.21-10.027-4.44-5.4-6.517-12.23-5.806-18.94.67-6.3 3.76-11.977 8.54-15.766 4.46-3.54 10.05-5.128 15.44-4.44 5.03.63 9.46 3.18 12.32 7.01l.02.024c2.65 3.5 3.75 7.814 3.1 11.92-.59 3.71-2.58 6.925-5.45 8.924-2.56 1.767-5.61 2.403-8.38 1.81-2.42-.516-4.42-1.92-5.53-3.79-.93-1.56-1.15-3.3-.69-4.75l-4.56-1.446L59.325 65c.36-1.12 1.068-1.905 1.84-2.22.25-.103.48-.14.668-.13.06.006.11.015.14.025.01 0 .01 0-.01-.01-.02-.015-.054-.045-.094-.088-.06-.064-.12-.145-.17-.244-.15-.29-.23-.678-.18-1.11l-.005.04c.15-1.332 1.38-2.523 3.035-2.523-2.65 0-4.79 2.144-4.79 4.787s2.14 4.785 4.78 4.785z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/stamp": {
            "title": "$:/core/images/stamp",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-stamp tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M49.7334301,64 L16.0098166,64 C11.5838751,64 8,67.5829053 8,72.002643 L8,74.4986785 L8,97 L120,97 L120,74.4986785 L120,72.002643 C120,67.5737547 116.413883,64 111.990183,64 L78.2665699,64 C76.502049,60.7519149 75.5,57.0311962 75.5,53.0769231 C75.5,46.6017951 78.1869052,40.7529228 82.5087769,36.5800577 C85.3313113,32.7688808 87,28.0549983 87,22.952183 C87,10.2760423 76.7025492,0 64,0 C51.2974508,0 41,10.2760423 41,22.952183 C41,28.0549983 42.6686887,32.7688808 45.4912231,36.5800577 C49.8130948,40.7529228 52.5,46.6017951 52.5,53.0769231 C52.5,57.0311962 51.497951,60.7519149 49.7334301,64 Z M8,104 L120,104 L120,112 L8,112 L8,104 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/star-filled": {
            "title": "$:/core/images/star-filled",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-star-filled tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"nonzero\">\n        <path d=\"M61.8361286,96.8228569 L99.1627704,124.110219 C101.883827,126.099427 105.541968,123.420868 104.505636,120.198072 L90.2895569,75.9887263 L89.0292911,79.8977279 L126.314504,52.5528988 C129.032541,50.5595011 127.635256,46.2255025 124.273711,46.2229134 L78.1610486,46.1873965 L81.4604673,48.6032923 L67.1773543,4.41589688 C66.1361365,1.19470104 61.6144265,1.19470104 60.5732087,4.41589688 L46.2900957,48.6032923 L49.5895144,46.1873965 L3.47685231,46.2229134 C0.115307373,46.2255025 -1.28197785,50.5595011 1.43605908,52.5528988 L38.7212719,79.8977279 L37.4610061,75.9887263 L23.2449266,120.198072 C22.2085954,123.420868 25.8667356,126.099427 28.5877926,124.110219 L65.9144344,96.8228569 L61.8361286,96.8228569 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/storyview-classic": {
            "title": "$:/core/images/storyview-classic",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-storyview-classic tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8.00697327,0 C3.58484404,0 0,3.59075293 0,8.00697327 L0,119.993027 C0,124.415156 3.59075293,128 8.00697327,128 L119.993027,128 C124.415156,128 128,124.409247 128,119.993027 L128,8.00697327 C128,3.58484404 124.409247,0 119.993027,0 L8.00697327,0 L8.00697327,0 Z M23.9992458,16 C19.5813843,16 16,19.5776607 16,23.9924054 L16,40.0075946 C16,44.4216782 19.5881049,48 23.9992458,48 L104.000754,48 C108.418616,48 112,44.4223393 112,40.0075946 L112,23.9924054 C112,19.5783218 108.411895,16 104.000754,16 L23.9992458,16 L23.9992458,16 Z M23.9992458,64 C19.5813843,64 16,67.5907123 16,72 C16,76.418278 19.5881049,80 23.9992458,80 L104.000754,80 C108.418616,80 112,76.4092877 112,72 C112,67.581722 108.411895,64 104.000754,64 L23.9992458,64 L23.9992458,64 Z M23.9992458,96 C19.5813843,96 16,99.5907123 16,104 C16,108.418278 19.5881049,112 23.9992458,112 L104.000754,112 C108.418616,112 112,108.409288 112,104 C112,99.581722 108.411895,96 104.000754,96 L23.9992458,96 L23.9992458,96 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/storyview-pop": {
            "title": "$:/core/images/storyview-pop",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-storyview-pop tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8.00697327,0 C3.58484404,0 0,3.59075293 0,8.00697327 L0,119.993027 C0,124.415156 3.59075293,128 8.00697327,128 L119.993027,128 C124.415156,128 128,124.409247 128,119.993027 L128,8.00697327 C128,3.58484404 124.409247,0 119.993027,0 L8.00697327,0 L8.00697327,0 Z M23.9992458,16 C19.5813843,16 16,19.5776607 16,23.9924054 L16,40.0075946 C16,44.4216782 19.5881049,48 23.9992458,48 L104.000754,48 C108.418616,48 112,44.4223393 112,40.0075946 L112,23.9924054 C112,19.5783218 108.411895,16 104.000754,16 L23.9992458,16 L23.9992458,16 Z M16.0098166,56 C11.586117,56 8,59.5776607 8,63.9924054 L8,80.0075946 C8,84.4216782 11.5838751,88 16.0098166,88 L111.990183,88 C116.413883,88 120,84.4223393 120,80.0075946 L120,63.9924054 C120,59.5783218 116.416125,56 111.990183,56 L16.0098166,56 L16.0098166,56 Z M23.9992458,96 C19.5813843,96 16,99.5907123 16,104 C16,108.418278 19.5881049,112 23.9992458,112 L104.000754,112 C108.418616,112 112,108.409288 112,104 C112,99.581722 108.411895,96 104.000754,96 L23.9992458,96 L23.9992458,96 Z M23.9992458,64 C19.5813843,64 16,67.5907123 16,72 C16,76.418278 19.5881049,80 23.9992458,80 L104.000754,80 C108.418616,80 112,76.4092877 112,72 C112,67.581722 108.411895,64 104.000754,64 L23.9992458,64 L23.9992458,64 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/storyview-zoomin": {
            "title": "$:/core/images/storyview-zoomin",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-storyview-zoomin tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8.00697327,0 C3.58484404,0 0,3.59075293 0,8.00697327 L0,119.993027 C0,124.415156 3.59075293,128 8.00697327,128 L119.993027,128 C124.415156,128 128,124.409247 128,119.993027 L128,8.00697327 C128,3.58484404 124.409247,0 119.993027,0 L8.00697327,0 L8.00697327,0 Z M23.9992458,16 C19.5813843,16 16,19.578055 16,24.0085154 L16,71.9914846 C16,76.4144655 19.5881049,80 23.9992458,80 L104.000754,80 C108.418616,80 112,76.421945 112,71.9914846 L112,24.0085154 C112,19.5855345 108.411895,16 104.000754,16 L23.9992458,16 L23.9992458,16 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/strikethrough": {
            "title": "$:/core/images/strikethrough",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-strikethrough tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M92.793842,38.7255689 L108.215529,38.7255689 C107.987058,31.985687 106.70193,26.1883331 104.360107,21.3333333 C102.018284,16.4783336 98.8197436,12.4516001 94.7643909,9.25301205 C90.7090382,6.05442399 85.9969032,3.71263572 80.6278447,2.22757697 C75.2587862,0.742518233 69.4328739,0 63.1499331,0 C57.552404,0 52.0977508,0.713959839 46.7858099,2.14190094 C41.473869,3.56984203 36.7331757,5.74027995 32.5635877,8.65327979 C28.3939997,11.5662796 25.0526676,15.2788708 22.5394913,19.7911647 C20.026315,24.3034585 18.7697456,29.6438781 18.7697456,35.8125837 C18.7697456,41.4101128 19.883523,46.0651309 22.1111111,49.7777778 C24.3386992,53.4904246 27.3087722,56.5176144 31.021419,58.8594378 C34.7340659,61.2012612 38.9321497,63.0861151 43.6157965,64.5140562 C48.2994433,65.9419973 53.068695,67.1985666 57.9236948,68.2838019 C62.7786945,69.3690371 67.5479462,70.4256977 72.231593,71.4538153 C76.9152398,72.4819329 81.1133237,73.8241773 84.8259705,75.480589 C88.5386174,77.1370007 91.5086903,79.2788802 93.7362784,81.9062918 C95.9638666,84.5337035 97.0776439,87.9607107 97.0776439,92.1874163 C97.0776439,96.6425926 96.1637753,100.298067 94.3360107,103.153949 C92.5082461,106.009831 90.109341,108.265944 87.1392236,109.922356 C84.1691061,111.578768 80.827774,112.749662 77.1151272,113.435074 C73.4024803,114.120485 69.7184476,114.463186 66.0629183,114.463186 C61.4935068,114.463186 57.0383974,113.892018 52.6974565,112.749665 C48.3565156,111.607312 44.5582492,109.836692 41.3025435,107.437751 C38.0468378,105.03881 35.4194656,101.983062 33.4203481,98.270415 C31.4212305,94.5577681 30.4216867,90.1312171 30.4216867,84.9906292 L15,84.9906292 C15,92.4159229 16.3422445,98.8415614 19.0267738,104.267738 C21.711303,109.693914 25.3667774,114.149023 29.9933066,117.633199 C34.6198357,121.117376 39.9888137,123.71619 46.1004016,125.429719 C52.2119895,127.143248 58.6947448,128 65.5488621,128 C71.1463912,128 76.7723948,127.343157 82.4270415,126.029451 C88.0816882,124.715745 93.1936407,122.602424 97.7630522,119.689424 C102.332464,116.776425 106.073613,113.006717 108.986613,108.380187 C111.899613,103.753658 113.356091,98.1847715 113.356091,91.6733601 C113.356091,85.6188899 112.242314,80.5926126 110.014726,76.5943775 C107.787137,72.5961424 104.817065,69.2833688 101.104418,66.6559572 C97.3917708,64.0285455 93.193687,61.9437828 88.5100402,60.4016064 C83.8263934,58.85943 79.0571416,57.5171855 74.2021419,56.3748327 C69.3471422,55.2324798 64.5778904,54.1758192 59.8942436,53.2048193 C55.2105968,52.2338193 51.012513,51.0058084 47.2998661,49.5207497 C43.5872193,48.0356909 40.6171463,46.1222786 38.3895582,43.7804552 C36.1619701,41.4386318 35.0481928,38.3828836 35.0481928,34.6131191 C35.0481928,30.6148841 35.8192694,27.273552 37.3614458,24.5890228 C38.9036222,21.9044935 40.9598265,19.762614 43.5301205,18.1633199 C46.1004145,16.5640259 49.041929,15.4216902 52.3547523,14.7362784 C55.6675757,14.0508667 59.0374661,13.708166 62.4645248,13.708166 C70.9179361,13.708166 77.8576257,15.6786952 83.2838019,19.6198126 C88.709978,23.56093 91.8799597,29.9294518 92.793842,38.7255689 L92.793842,38.7255689 Z\"></path>\n        <rect x=\"5\" y=\"54\" width=\"118\" height=\"16\"></rect>\n    </g>\n</svg>"
        },
        "$:/core/images/subscript": {
            "title": "$:/core/images/subscript",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-subscript tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M2.27170276,16 L22.1825093,16 L43.8305003,49.6746527 L66.4138983,16 L85.1220387,16 L53.5854592,61.9685735 L87.3937414,111.411516 L67.0820462,111.411516 L43.295982,74.9306422 L19.1090291,111.411516 L0,111.411516 L33.8082822,61.9685735 L2.27170276,16 Z M127.910914,128.411516 L85.3276227,128.411516 C85.3870139,123.24448 86.6342108,118.730815 89.0692508,114.870386 C91.5042907,111.009956 94.8301491,107.654403 99.0469256,104.803624 C101.066227,103.318844 103.174584,101.878629 105.372059,100.482935 C107.569534,99.0872413 109.588805,97.5876355 111.429933,95.9840726 C113.271061,94.3805097 114.785514,92.6433426 115.973338,90.7725192 C117.161163,88.9016958 117.784761,86.7487964 117.844152,84.3137564 C117.844152,83.1853233 117.710524,81.9826691 117.443264,80.7057579 C117.176003,79.4288467 116.656338,78.2410402 115.884252,77.1423026 C115.112166,76.0435651 114.04314,75.123015 112.677142,74.3806248 C111.311144,73.6382345 109.529434,73.267045 107.331959,73.267045 C105.312658,73.267045 103.634881,73.6679297 102.298579,74.4697112 C100.962276,75.2714926 99.8932503,76.3702137 99.0914688,77.7659073 C98.2896874,79.161601 97.6957841,80.8096826 97.3097412,82.7102016 C96.9236982,84.6107206 96.7009845,86.6596869 96.6415933,88.857162 L86.4857457,88.857162 C86.4857457,85.4124713 86.9460207,82.2202411 87.8665846,79.2803758 C88.7871485,76.3405105 90.1679736,73.801574 92.0091014,71.6634901 C93.8502292,69.5254062 96.092214,67.8476295 98.7351233,66.6301095 C101.378033,65.4125895 104.451482,64.8038386 107.955564,64.8038386 C111.756602,64.8038386 114.933984,65.4274371 117.487807,66.6746527 C120.041629,67.9218683 122.105443,69.4957119 123.67931,71.3962309 C125.253178,73.2967499 126.366746,75.3605638 127.02005,77.5877345 C127.673353,79.8149053 128,81.9381095 128,83.9574109 C128,86.4518421 127.613963,88.7086746 126.841877,90.727976 C126.069791,92.7472774 125.03046,94.6032252 123.723854,96.2958749 C122.417247,97.9885247 120.932489,99.5475208 119.269534,100.97291 C117.60658,102.398299 115.884261,103.734582 114.102524,104.981797 C112.320788,106.229013 110.539078,107.416819 108.757341,108.545253 C106.975605,109.673686 105.327523,110.802102 103.813047,111.930535 C102.298571,113.058968 100.977136,114.231927 99.8487031,115.449447 C98.7202699,116.666967 97.9481956,117.958707 97.5324571,119.324705 L127.910914,119.324705 L127.910914,128.411516 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/superscript": {
            "title": "$:/core/images/superscript",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-superscript tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M2.27170276,16 L22.1825093,16 L43.8305003,49.6746527 L66.4138983,16 L85.1220387,16 L53.5854592,61.9685735 L87.3937414,111.411516 L67.0820462,111.411516 L43.295982,74.9306422 L19.1090291,111.411516 L0,111.411516 L33.8082822,61.9685735 L2.27170276,16 Z M127.910914,63.4115159 L85.3276227,63.4115159 C85.3870139,58.2444799 86.6342108,53.7308149 89.0692508,49.8703857 C91.5042907,46.0099565 94.8301491,42.654403 99.0469256,39.8036245 C101.066227,38.318844 103.174584,36.8786285 105.372059,35.4829349 C107.569534,34.0872413 109.588805,32.5876355 111.429933,30.9840726 C113.271061,29.3805097 114.785514,27.6433426 115.973338,25.7725192 C117.161163,23.9016958 117.784761,21.7487964 117.844152,19.3137564 C117.844152,18.1853233 117.710524,16.9826691 117.443264,15.7057579 C117.176003,14.4288467 116.656338,13.2410402 115.884252,12.1423026 C115.112166,11.0435651 114.04314,10.123015 112.677142,9.38062477 C111.311144,8.63823453 109.529434,8.26704499 107.331959,8.26704499 C105.312658,8.26704499 103.634881,8.6679297 102.298579,9.46971115 C100.962276,10.2714926 99.8932503,11.3702137 99.0914688,12.7659073 C98.2896874,14.161601 97.6957841,15.8096826 97.3097412,17.7102016 C96.9236982,19.6107206 96.7009845,21.6596869 96.6415933,23.857162 L86.4857457,23.857162 C86.4857457,20.4124713 86.9460207,17.2202411 87.8665846,14.2803758 C88.7871485,11.3405105 90.1679736,8.80157397 92.0091014,6.6634901 C93.8502292,4.52540622 96.092214,2.84762946 98.7351233,1.63010947 C101.378033,0.412589489 104.451482,-0.196161372 107.955564,-0.196161372 C111.756602,-0.196161372 114.933984,0.427437071 117.487807,1.67465266 C120.041629,2.92186826 122.105443,4.49571195 123.67931,6.39623095 C125.253178,8.29674995 126.366746,10.3605638 127.02005,12.5877345 C127.673353,14.8149053 128,16.9381095 128,18.9574109 C128,21.4518421 127.613963,23.7086746 126.841877,25.727976 C126.069791,27.7472774 125.03046,29.6032252 123.723854,31.2958749 C122.417247,32.9885247 120.932489,34.5475208 119.269534,35.97291 C117.60658,37.3982993 115.884261,38.7345816 114.102524,39.9817972 C112.320788,41.2290128 110.539078,42.4168194 108.757341,43.5452525 C106.975605,44.6736857 105.327523,45.8021019 103.813047,46.9305351 C102.298571,48.0589682 100.977136,49.2319272 99.8487031,50.4494472 C98.7202699,51.6669672 97.9481956,52.9587068 97.5324571,54.3247048 L127.910914,54.3247048 L127.910914,63.4115159 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/tag-button": {
            "title": "$:/core/images/tag-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-tag-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M18.1643182,47.6600756 L18.1677196,51.7651887 C18.1708869,55.5878829 20.3581578,60.8623899 23.0531352,63.5573673 L84.9021823,125.406414 C87.5996731,128.103905 91.971139,128.096834 94.6717387,125.396234 L125.766905,94.3010679 C128.473612,91.5943612 128.472063,87.2264889 125.777085,84.5315115 L63.9280381,22.6824644 C61.2305472,19.9849735 55.9517395,17.801995 52.1318769,17.8010313 L25.0560441,17.7942007 C21.2311475,17.7932358 18.1421354,20.8872832 18.1452985,24.7049463 L18.1535504,34.6641936 C18.2481119,34.6754562 18.3439134,34.6864294 18.4409623,34.6971263 C22.1702157,35.1081705 26.9295004,34.6530132 31.806204,33.5444844 C32.1342781,33.0700515 32.5094815,32.6184036 32.9318197,32.1960654 C35.6385117,29.4893734 39.5490441,28.718649 42.94592,29.8824694 C43.0432142,29.8394357 43.1402334,29.7961748 43.2369683,29.7526887 L43.3646982,30.0368244 C44.566601,30.5115916 45.6933052,31.2351533 46.6655958,32.2074439 C50.4612154,36.0030635 50.4663097,42.1518845 46.6769742,45.94122 C43.0594074,49.5587868 37.2914155,49.7181264 33.4734256,46.422636 C28.1082519,47.5454734 22.7987486,48.0186448 18.1643182,47.6600756 Z\"></path>\n        <path d=\"M47.6333528,39.5324628 L47.6562932,39.5834939 C37.9670934,43.9391617 26.0718874,46.3819521 17.260095,45.4107025 C5.27267473,44.0894301 -1.02778744,36.4307276 2.44271359,24.0779512 C5.56175386,12.9761516 14.3014034,4.36129832 24.0466405,1.54817001 C34.7269254,-1.53487574 43.7955833,3.51606438 43.7955834,14.7730751 L35.1728168,14.7730752 C35.1728167,9.91428944 32.0946059,8.19982862 26.4381034,9.83267419 C19.5270911,11.8276553 13.046247,18.2159574 10.7440788,26.4102121 C8.82861123,33.2280582 11.161186,36.0634845 18.2047888,36.8398415 C25.3302805,37.6252244 35.7353482,35.4884477 44.1208333,31.7188498 L44.1475077,31.7781871 C44.159701,31.7725635 44.1718402,31.7671479 44.1839238,31.7619434 C45.9448098,31.0035157 50.4503245,38.3109156 47.7081571,39.5012767 C47.6834429,39.512005 47.6585061,39.5223987 47.6333528,39.5324628 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/theme-button": {
            "title": "$:/core/images/theme-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-theme-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M55.854113,66.9453198 C54.3299482,65.1432292 53.0133883,63.518995 51.9542746,62.1263761 C40.8899947,47.578055 35.3091807,55.2383404 28.9941893,62.1263758 C22.6791979,69.0144112 30.6577916,74.5954741 24.6646171,79.4611023 C18.6714426,84.3267304 19.0414417,86.0133155 8.92654943,77.1119468 C-1.18834284,68.2105781 -1.88793412,65.7597832 2.7553553,60.6807286 C7.39864472,55.601674 11.2794845,63.5989423 20.7646627,54.5728325 C30.2498409,45.5467226 22.2819131,37.5470737 22.2819131,37.5470737 C22.2819131,37.5470737 42.0310399,-2.82433362 68.4206088,0.157393922 C94.8101776,3.13912147 58.4373806,-3.70356506 49.3898693,27.958066 C45.5161782,41.5139906 50.1107906,38.3197672 57.4560458,44.0453955 C59.1625767,45.3756367 63.8839488,48.777453 70.127165,53.3625321 C63.9980513,59.2416709 58.9704753,64.0315459 55.854113,66.9453198 Z M67.4952439,79.8919946 C83.5082212,96.9282402 105.237121,117.617674 112.611591,120.312493 C123.044132,124.12481 128.000001,117.170903 128,105.522947 C127.999999,98.3705516 104.170675,78.980486 84.0760493,63.7529565 C76.6683337,70.9090328 70.7000957,76.7055226 67.4952439,79.8919946 Z\"></path>\n        <path d=\"M58.2852966,138.232794 L58.2852966,88.3943645 C56.318874,88.3923153 54.7254089,86.7952906 54.7254089,84.8344788 C54.7254089,82.8684071 56.3175932,81.2745911 58.2890859,81.2745911 L79.6408336,81.2745911 C81.608998,81.2745911 83.2045105,82.8724076 83.2045105,84.8344788 C83.2045105,86.7992907 81.614366,88.3923238 79.6446228,88.3943645 L79.6446228,88.3943646 L79.6446228,138.232794 C79.6446228,144.131009 74.8631748,148.912457 68.9649597,148.912457 C63.0667446,148.912457 58.2852966,144.131009 58.2852966,138.232794 Z M65.405072,-14.8423767 L72.5248474,-14.8423767 L76.0847351,-0.690681892 L72.5248474,6.51694947 L72.5248474,81.2745911 L65.405072,81.2745911 L65.405072,6.51694947 L61.8451843,-0.690681892 L65.405072,-14.8423767 Z\" transform=\"translate(68.964960, 67.035040) rotate(45.000000) translate(-68.964960, -67.035040) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/tip": {
            "title": "$:/core/images/tip",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-tip tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M64,128.241818 C99.346224,128.241818 128,99.5880417 128,64.2418177 C128,28.8955937 99.346224,0.241817675 64,0.241817675 C28.653776,0.241817675 0,28.8955937 0,64.2418177 C0,99.5880417 28.653776,128.241818 64,128.241818 Z M75.9358659,91.4531941 C75.3115438,95.581915 70.2059206,98.8016748 64,98.8016748 C57.7940794,98.8016748 52.6884562,95.581915 52.0641341,91.4531941 C54.3299053,94.0502127 58.8248941,95.8192805 64,95.8192805 C69.1751059,95.8192805 73.6700947,94.0502127 75.9358659,91.4531941 L75.9358659,91.4531941 Z M75.9358659,95.9453413 C75.3115438,100.074062 70.2059206,103.293822 64,103.293822 C57.7940794,103.293822 52.6884562,100.074062 52.0641341,95.9453413 C54.3299053,98.5423599 58.8248941,100.311428 64,100.311428 C69.1751059,100.311428 73.6700947,98.5423599 75.9358659,95.9453413 L75.9358659,95.9453413 Z M75.9358659,100.40119 C75.3115438,104.529911 70.2059206,107.74967 64,107.74967 C57.7940794,107.74967 52.6884562,104.529911 52.0641341,100.40119 C54.3299053,102.998208 58.8248941,104.767276 64,104.767276 C69.1751059,104.767276 73.6700947,102.998208 75.9358659,100.40119 L75.9358659,100.40119 Z M75.9358659,104.893337 C75.3115438,109.022058 70.2059206,112.241818 64,112.241818 C57.7940794,112.241818 52.6884562,109.022058 52.0641341,104.893337 C54.3299053,107.490356 58.8248941,109.259423 64,109.259423 C69.1751059,109.259423 73.6700947,107.490356 75.9358659,104.893337 L75.9358659,104.893337 Z M64.3010456,24.2418177 C75.9193117,24.2418188 88.0000013,32.0619847 88,48.4419659 C87.9999987,64.8219472 75.9193018,71.7540963 75.9193021,83.5755932 C75.9193022,89.4486648 70.0521957,92.8368862 63.9999994,92.8368862 C57.947803,92.8368862 51.9731007,89.8295115 51.9731007,83.5755932 C51.9731007,71.1469799 39.9999998,65.4700602 40,48.4419647 C40.0000002,31.4138691 52.6827796,24.2418166 64.3010456,24.2418177 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/twitter": {
            "title": "$:/core/images/twitter",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-twitter tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M41.6263422,115.803477 C27.0279663,115.803477 13.4398394,111.540813 1.99987456,104.234833 C4.02221627,104.472643 6.08004574,104.594302 8.16644978,104.594302 C20.277456,104.594302 31.4238403,100.47763 40.270894,93.5715185 C28.9590538,93.3635501 19.4123842,85.9189246 16.1230832,75.6885328 C17.7011365,75.9892376 19.320669,76.1503787 20.9862896,76.1503787 C23.344152,76.1503787 25.6278127,75.8359011 27.7971751,75.247346 C15.9709927,72.8821073 7.06079851,62.4745062 7.06079851,49.9982394 C7.06079851,49.8898938 7.06079851,49.7820074 7.06264203,49.67458 C10.5482779,51.6032228 14.5339687,52.7615103 18.7717609,52.8951059 C11.8355159,48.277565 7.2714207,40.3958845 7.2714207,31.4624258 C7.2714207,26.7434257 8.54621495,22.3200804 10.7713439,18.5169676 C23.5211299,34.0957738 42.568842,44.3472839 64.0532269,45.4210985 C63.6126256,43.5365285 63.3835682,41.5711584 63.3835682,39.5529928 C63.3835682,25.3326379 74.95811,13.8034766 89.2347917,13.8034766 C96.6697089,13.8034766 103.387958,16.930807 108.103682,21.9353619 C113.991886,20.780288 119.52429,18.6372496 124.518847,15.6866694 C122.588682,21.6993889 118.490075,26.7457211 113.152623,29.9327334 C118.381769,29.3102055 123.363882,27.926045 127.999875,25.8780385 C124.534056,31.0418981 120.151087,35.5772616 115.100763,39.2077561 C115.150538,40.3118708 115.175426,41.4224128 115.175426,42.538923 C115.175426,76.5663154 89.1744164,115.803477 41.6263422,115.803477\"></path>\n    </g>\n</svg>\n"
        },
        "$:/core/images/underline": {
            "title": "$:/core/images/underline",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-underline tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M7,117.421488 L121.247934,117.421488 L121.247934,128 L7,128 L7,117.421488 Z M104.871212,98.8958333 L104.871212,0 L88.6117424,0 L88.6117424,55.8560606 C88.6117424,60.3194668 88.0060035,64.432115 86.7945076,68.1941288 C85.5830116,71.9561425 83.7657949,75.239885 81.342803,78.0454545 C78.9198111,80.8510241 75.8911167,83.0189317 72.2566288,84.5492424 C68.6221409,86.0795531 64.3182067,86.844697 59.344697,86.844697 C53.0959284,86.844697 48.1862552,85.0593613 44.6155303,81.4886364 C41.0448054,77.9179114 39.2594697,73.0720003 39.2594697,66.9507576 L39.2594697,0 L23,0 L23,65.0378788 C23,70.3939662 23.5419769,75.2717583 24.625947,79.6714015 C25.709917,84.0710447 27.5908957,87.864883 30.2689394,91.0530303 C32.9469831,94.2411776 36.4538925,96.6960141 40.7897727,98.4176136 C45.125653,100.139213 50.545422,101 57.0492424,101 C64.3182182,101 70.630655,99.5653553 75.9867424,96.6960227 C81.3428298,93.8266902 85.742407,89.33147 89.1856061,83.2102273 L89.5681818,83.2102273 L89.5681818,98.8958333 L104.871212,98.8958333 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/unfold-all-button": {
            "title": "$:/core/images/unfold-all-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-unfold-all tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"0\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"0\" y=\"64\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M85.598226,8.34884273 C84.1490432,6.89863875 82.1463102,6 79.9340286,6 L47.9482224,6 C43.5292967,6 39.9411255,9.581722 39.9411255,14 C39.9411255,18.4092877 43.5260249,22 47.9482224,22 L71.9411255,22 L71.9411255,45.9929031 C71.9411255,50.4118288 75.5228475,54 79.9411255,54 C84.3504132,54 87.9411255,50.4151006 87.9411255,45.9929031 L87.9411255,14.0070969 C87.9411255,11.7964515 87.0447363,9.79371715 85.5956548,8.34412458 Z\" transform=\"translate(63.941125, 30.000000) scale(1, -1) rotate(-45.000000) translate(-63.941125, -30.000000) \"></path>\n        <path d=\"M85.6571005,72.2899682 C84.2079177,70.8397642 82.2051847,69.9411255 79.9929031,69.9411255 L48.0070969,69.9411255 C43.5881712,69.9411255 40,73.5228475 40,77.9411255 C40,82.3504132 43.5848994,85.9411255 48.0070969,85.9411255 L72,85.9411255 L72,109.934029 C72,114.352954 75.581722,117.941125 80,117.941125 C84.4092877,117.941125 88,114.356226 88,109.934029 L88,77.9482224 C88,75.737577 87.1036108,73.7348426 85.6545293,72.2852501 Z\" transform=\"translate(64.000000, 93.941125) scale(1, -1) rotate(-45.000000) translate(-64.000000, -93.941125) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/unfold-button": {
            "title": "$:/core/images/unfold-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-unfold tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"0\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M85.598226,11.3488427 C84.1490432,9.89863875 82.1463102,9 79.9340286,9 L47.9482224,9 C43.5292967,9 39.9411255,12.581722 39.9411255,17 C39.9411255,21.4092877 43.5260249,25 47.9482224,25 L71.9411255,25 L71.9411255,48.9929031 C71.9411255,53.4118288 75.5228475,57 79.9411255,57 C84.3504132,57 87.9411255,53.4151006 87.9411255,48.9929031 L87.9411255,17.0070969 C87.9411255,14.7964515 87.0447363,12.7937171 85.5956548,11.3441246 Z\" transform=\"translate(63.941125, 33.000000) scale(1, -1) rotate(-45.000000) translate(-63.941125, -33.000000) \"></path>\n        <path d=\"M85.6571005,53.4077172 C84.2079177,51.9575133 82.2051847,51.0588745 79.9929031,51.0588745 L48.0070969,51.0588745 C43.5881712,51.0588745 40,54.6405965 40,59.0588745 C40,63.4681622 43.5848994,67.0588745 48.0070969,67.0588745 L72,67.0588745 L72,91.0517776 C72,95.4707033 75.581722,99.0588745 80,99.0588745 C84.4092877,99.0588745 88,95.4739751 88,91.0517776 L88,59.0659714 C88,56.855326 87.1036108,54.8525917 85.6545293,53.4029991 Z\" transform=\"translate(64.000000, 75.058875) scale(1, -1) rotate(-45.000000) translate(-64.000000, -75.058875) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/unlocked-padlock": {
            "title": "$:/core/images/unlocked-padlock",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-unlocked-padlock tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M48.6266053,64 L105,64 L105,96.0097716 C105,113.673909 90.6736461,128 73.001193,128 L55.998807,128 C38.3179793,128 24,113.677487 24,96.0097716 L24,64 L30.136303,64 C19.6806213,51.3490406 2.77158986,28.2115132 25.8366966,8.85759246 C50.4723026,-11.8141335 71.6711028,13.2108337 81.613302,25.0594855 C91.5555012,36.9081373 78.9368488,47.4964439 69.1559674,34.9513593 C59.375086,22.4062748 47.9893192,10.8049522 35.9485154,20.9083862 C23.9077117,31.0118202 34.192312,43.2685325 44.7624679,55.8655518 C47.229397,58.805523 48.403443,61.5979188 48.6266053,64 Z M67.7315279,92.3641717 C70.8232551,91.0923621 73,88.0503841 73,84.5 C73,79.8055796 69.1944204,76 64.5,76 C59.8055796,76 56,79.8055796 56,84.5 C56,87.947435 58.0523387,90.9155206 61.0018621,92.2491029 L55.9067479,115.020857 L72.8008958,115.020857 L67.7315279,92.3641717 L67.7315279,92.3641717 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/up-arrow": {
            "created": "20150316000544368",
            "modified": "20150316000831867",
            "tags": "$:/tags/Image",
            "title": "$:/core/images/up-arrow",
            "text": "<svg class=\"tc-image-up-arrow tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n<path transform=\"rotate(-135, 63.8945, 64.1752)\" d=\"m109.07576,109.35336c-1.43248,1.43361 -3.41136,2.32182 -5.59717,2.32182l-79.16816,0c-4.36519,0 -7.91592,-3.5444 -7.91592,-7.91666c0,-4.36337 3.54408,-7.91667 7.91592,-7.91667l71.25075,0l0,-71.25074c0,-4.3652 3.54442,-7.91592 7.91667,-7.91592c4.36336,0 7.91667,3.54408 7.91667,7.91592l0,79.16815c0,2.1825 -0.88602,4.16136 -2.3185,5.59467l-0.00027,-0.00056l0.00001,-0.00001z\" />\n</svg>\n \n"
        },
        "$:/core/images/video": {
            "title": "$:/core/images/video",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-video tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M64,12 C29.0909091,12 8.72727273,14.9166667 5.81818182,17.8333333 C2.90909091,20.75 1.93784382e-15,41.1666667 0,64.5 C1.93784382e-15,87.8333333 2.90909091,108.25 5.81818182,111.166667 C8.72727273,114.083333 29.0909091,117 64,117 C98.9090909,117 119.272727,114.083333 122.181818,111.166667 C125.090909,108.25 128,87.8333333 128,64.5 C128,41.1666667 125.090909,20.75 122.181818,17.8333333 C119.272727,14.9166667 98.9090909,12 64,12 Z M54.9161194,44.6182253 C51.102648,42.0759111 48.0112186,43.7391738 48.0112186,48.3159447 L48.0112186,79.6840553 C48.0112186,84.2685636 51.109784,85.9193316 54.9161194,83.3817747 L77.0838806,68.6032672 C80.897352,66.0609529 80.890216,61.9342897 77.0838806,59.3967328 L54.9161194,44.6182253 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/warning": {
            "title": "$:/core/images/warning",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-warning tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M57.0717968,11 C60.1509982,5.66666667 67.8490018,5.66666667 70.9282032,11 L126.353829,107 C129.433031,112.333333 125.584029,119 119.425626,119 L8.57437416,119 C2.41597129,119 -1.43303051,112.333333 1.64617093,107 L57.0717968,11 Z M64,37 C59.581722,37 56,40.5820489 56,44.9935776 L56,73.0064224 C56,77.4211534 59.5907123,81 64,81 C68.418278,81 72,77.4179511 72,73.0064224 L72,44.9935776 C72,40.5788466 68.4092877,37 64,37 Z M64,104 C68.418278,104 72,100.418278 72,96 C72,91.581722 68.418278,88 64,88 C59.581722,88 56,91.581722 56,96 C56,100.418278 59.581722,104 64,104 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/language/Buttons/AdvancedSearch/Caption": {
            "title": "$:/language/Buttons/AdvancedSearch/Caption",
            "text": "advanced search"
        },
        "$:/language/Buttons/AdvancedSearch/Hint": {
            "title": "$:/language/Buttons/AdvancedSearch/Hint",
            "text": "Advanced search"
        },
        "$:/language/Buttons/Cancel/Caption": {
            "title": "$:/language/Buttons/Cancel/Caption",
            "text": "cancel"
        },
        "$:/language/Buttons/Cancel/Hint": {
            "title": "$:/language/Buttons/Cancel/Hint",
            "text": "Discard changes to this tiddler"
        },
        "$:/language/Buttons/Clone/Caption": {
            "title": "$:/language/Buttons/Clone/Caption",
            "text": "clone"
        },
        "$:/language/Buttons/Clone/Hint": {
            "title": "$:/language/Buttons/Clone/Hint",
            "text": "Clone this tiddler"
        },
        "$:/language/Buttons/Close/Caption": {
            "title": "$:/language/Buttons/Close/Caption",
            "text": "close"
        },
        "$:/language/Buttons/Close/Hint": {
            "title": "$:/language/Buttons/Close/Hint",
            "text": "Close this tiddler"
        },
        "$:/language/Buttons/CloseAll/Caption": {
            "title": "$:/language/Buttons/CloseAll/Caption",
            "text": "close all"
        },
        "$:/language/Buttons/CloseAll/Hint": {
            "title": "$:/language/Buttons/CloseAll/Hint",
            "text": "Close all tiddlers"
        },
        "$:/language/Buttons/CloseOthers/Caption": {
            "title": "$:/language/Buttons/CloseOthers/Caption",
            "text": "close others"
        },
        "$:/language/Buttons/CloseOthers/Hint": {
            "title": "$:/language/Buttons/CloseOthers/Hint",
            "text": "Close other tiddlers"
        },
        "$:/language/Buttons/ControlPanel/Caption": {
            "title": "$:/language/Buttons/ControlPanel/Caption",
            "text": "control panel"
        },
        "$:/language/Buttons/ControlPanel/Hint": {
            "title": "$:/language/Buttons/ControlPanel/Hint",
            "text": "Open control panel"
        },
        "$:/language/Buttons/Delete/Caption": {
            "title": "$:/language/Buttons/Delete/Caption",
            "text": "delete"
        },
        "$:/language/Buttons/Delete/Hint": {
            "title": "$:/language/Buttons/Delete/Hint",
            "text": "Delete this tiddler"
        },
        "$:/language/Buttons/Edit/Caption": {
            "title": "$:/language/Buttons/Edit/Caption",
            "text": "edit"
        },
        "$:/language/Buttons/Edit/Hint": {
            "title": "$:/language/Buttons/Edit/Hint",
            "text": "Edit this tiddler"
        },
        "$:/language/Buttons/Encryption/Caption": {
            "title": "$:/language/Buttons/Encryption/Caption",
            "text": "encryption"
        },
        "$:/language/Buttons/Encryption/Hint": {
            "title": "$:/language/Buttons/Encryption/Hint",
            "text": "Set or clear a password for saving this wiki"
        },
        "$:/language/Buttons/Encryption/ClearPassword/Caption": {
            "title": "$:/language/Buttons/Encryption/ClearPassword/Caption",
            "text": "clear password"
        },
        "$:/language/Buttons/Encryption/ClearPassword/Hint": {
            "title": "$:/language/Buttons/Encryption/ClearPassword/Hint",
            "text": "Clear the password and save this wiki without encryption"
        },
        "$:/language/Buttons/Encryption/SetPassword/Caption": {
            "title": "$:/language/Buttons/Encryption/SetPassword/Caption",
            "text": "set password"
        },
        "$:/language/Buttons/Encryption/SetPassword/Hint": {
            "title": "$:/language/Buttons/Encryption/SetPassword/Hint",
            "text": "Set a password for saving this wiki with encryption"
        },
        "$:/language/Buttons/ExportPage/Caption": {
            "title": "$:/language/Buttons/ExportPage/Caption",
            "text": "export all"
        },
        "$:/language/Buttons/ExportPage/Hint": {
            "title": "$:/language/Buttons/ExportPage/Hint",
            "text": "Export all tiddlers"
        },
        "$:/language/Buttons/ExportTiddler/Caption": {
            "title": "$:/language/Buttons/ExportTiddler/Caption",
            "text": "export tiddler"
        },
        "$:/language/Buttons/ExportTiddler/Hint": {
            "title": "$:/language/Buttons/ExportTiddler/Hint",
            "text": "Export tiddler"
        },
        "$:/language/Buttons/ExportTiddlers/Caption": {
            "title": "$:/language/Buttons/ExportTiddlers/Caption",
            "text": "export tiddlers"
        },
        "$:/language/Buttons/ExportTiddlers/Hint": {
            "title": "$:/language/Buttons/ExportTiddlers/Hint",
            "text": "Export tiddlers"
        },
        "$:/language/Buttons/Fold/Caption": {
            "title": "$:/language/Buttons/Fold/Caption",
            "text": "fold tiddler"
        },
        "$:/language/Buttons/Fold/Hint": {
            "title": "$:/language/Buttons/Fold/Hint",
            "text": "Fold the body of this tiddler"
        },
        "$:/language/Buttons/Fold/FoldBar/Caption": {
            "title": "$:/language/Buttons/Fold/FoldBar/Caption",
            "text": "fold-bar"
        },
        "$:/language/Buttons/Fold/FoldBar/Hint": {
            "title": "$:/language/Buttons/Fold/FoldBar/Hint",
            "text": "Optional bars to fold and unfold tiddlers"
        },
        "$:/language/Buttons/Unfold/Caption": {
            "title": "$:/language/Buttons/Unfold/Caption",
            "text": "unfold tiddler"
        },
        "$:/language/Buttons/Unfold/Hint": {
            "title": "$:/language/Buttons/Unfold/Hint",
            "text": "Unfold the body of this tiddler"
        },
        "$:/language/Buttons/FoldOthers/Caption": {
            "title": "$:/language/Buttons/FoldOthers/Caption",
            "text": "fold other tiddlers"
        },
        "$:/language/Buttons/FoldOthers/Hint": {
            "title": "$:/language/Buttons/FoldOthers/Hint",
            "text": "Fold the bodies of other opened tiddlers"
        },
        "$:/language/Buttons/FoldAll/Caption": {
            "title": "$:/language/Buttons/FoldAll/Caption",
            "text": "fold all tiddlers"
        },
        "$:/language/Buttons/FoldAll/Hint": {
            "title": "$:/language/Buttons/FoldAll/Hint",
            "text": "Fold the bodies of all opened tiddlers"
        },
        "$:/language/Buttons/UnfoldAll/Caption": {
            "title": "$:/language/Buttons/UnfoldAll/Caption",
            "text": "unfold all tiddlers"
        },
        "$:/language/Buttons/UnfoldAll/Hint": {
            "title": "$:/language/Buttons/UnfoldAll/Hint",
            "text": "Unfold the bodies of all opened tiddlers"
        },
        "$:/language/Buttons/FullScreen/Caption": {
            "title": "$:/language/Buttons/FullScreen/Caption",
            "text": "full-screen"
        },
        "$:/language/Buttons/FullScreen/Hint": {
            "title": "$:/language/Buttons/FullScreen/Hint",
            "text": "Enter or leave full-screen mode"
        },
        "$:/language/Buttons/Help/Caption": {
            "title": "$:/language/Buttons/Help/Caption",
            "text": "help"
        },
        "$:/language/Buttons/Help/Hint": {
            "title": "$:/language/Buttons/Help/Hint",
            "text": "Show help panel"
        },
        "$:/language/Buttons/Import/Caption": {
            "title": "$:/language/Buttons/Import/Caption",
            "text": "import"
        },
        "$:/language/Buttons/Import/Hint": {
            "title": "$:/language/Buttons/Import/Hint",
            "text": "Import many types of file including text, image, TiddlyWiki or JSON"
        },
        "$:/language/Buttons/Info/Caption": {
            "title": "$:/language/Buttons/Info/Caption",
            "text": "info"
        },
        "$:/language/Buttons/Info/Hint": {
            "title": "$:/language/Buttons/Info/Hint",
            "text": "Show information for this tiddler"
        },
        "$:/language/Buttons/Home/Caption": {
            "title": "$:/language/Buttons/Home/Caption",
            "text": "home"
        },
        "$:/language/Buttons/Home/Hint": {
            "title": "$:/language/Buttons/Home/Hint",
            "text": "Open the default tiddlers"
        },
        "$:/language/Buttons/Language/Caption": {
            "title": "$:/language/Buttons/Language/Caption",
            "text": "language"
        },
        "$:/language/Buttons/Language/Hint": {
            "title": "$:/language/Buttons/Language/Hint",
            "text": "Choose the user interface language"
        },
        "$:/language/Buttons/More/Caption": {
            "title": "$:/language/Buttons/More/Caption",
            "text": "more"
        },
        "$:/language/Buttons/More/Hint": {
            "title": "$:/language/Buttons/More/Hint",
            "text": "More actions"
        },
        "$:/language/Buttons/NewHere/Caption": {
            "title": "$:/language/Buttons/NewHere/Caption",
            "text": "new here"
        },
        "$:/language/Buttons/NewHere/Hint": {
            "title": "$:/language/Buttons/NewHere/Hint",
            "text": "Create a new tiddler tagged with this one"
        },
        "$:/language/Buttons/NewJournal/Caption": {
            "title": "$:/language/Buttons/NewJournal/Caption",
            "text": "new journal"
        },
        "$:/language/Buttons/NewJournal/Hint": {
            "title": "$:/language/Buttons/NewJournal/Hint",
            "text": "Create a new journal tiddler"
        },
        "$:/language/Buttons/NewJournalHere/Caption": {
            "title": "$:/language/Buttons/NewJournalHere/Caption",
            "text": "new journal here"
        },
        "$:/language/Buttons/NewJournalHere/Hint": {
            "title": "$:/language/Buttons/NewJournalHere/Hint",
            "text": "Create a new journal tiddler tagged with this one"
        },
        "$:/language/Buttons/NewImage/Caption": {
            "title": "$:/language/Buttons/NewImage/Caption",
            "text": "new image"
        },
        "$:/language/Buttons/NewImage/Hint": {
            "title": "$:/language/Buttons/NewImage/Hint",
            "text": "Create a new image tiddler"
        },
        "$:/language/Buttons/NewMarkdown/Caption": {
            "title": "$:/language/Buttons/NewMarkdown/Caption",
            "text": "new Markdown tiddler"
        },
        "$:/language/Buttons/NewMarkdown/Hint": {
            "title": "$:/language/Buttons/NewMarkdown/Hint",
            "text": "Create a new Markdown tiddler"
        },
        "$:/language/Buttons/NewTiddler/Caption": {
            "title": "$:/language/Buttons/NewTiddler/Caption",
            "text": "new tiddler"
        },
        "$:/language/Buttons/NewTiddler/Hint": {
            "title": "$:/language/Buttons/NewTiddler/Hint",
            "text": "Create a new tiddler"
        },
        "$:/language/Buttons/OpenWindow/Caption": {
            "title": "$:/language/Buttons/OpenWindow/Caption",
            "text": "open in new window"
        },
        "$:/language/Buttons/OpenWindow/Hint": {
            "title": "$:/language/Buttons/OpenWindow/Hint",
            "text": "Open tiddler in new window"
        },
        "$:/language/Buttons/Palette/Caption": {
            "title": "$:/language/Buttons/Palette/Caption",
            "text": "palette"
        },
        "$:/language/Buttons/Palette/Hint": {
            "title": "$:/language/Buttons/Palette/Hint",
            "text": "Choose the colour palette"
        },
        "$:/language/Buttons/Permalink/Caption": {
            "title": "$:/language/Buttons/Permalink/Caption",
            "text": "permalink"
        },
        "$:/language/Buttons/Permalink/Hint": {
            "title": "$:/language/Buttons/Permalink/Hint",
            "text": "Set browser address bar to a direct link to this tiddler"
        },
        "$:/language/Buttons/Permaview/Caption": {
            "title": "$:/language/Buttons/Permaview/Caption",
            "text": "permaview"
        },
        "$:/language/Buttons/Permaview/Hint": {
            "title": "$:/language/Buttons/Permaview/Hint",
            "text": "Set browser address bar to a direct link to all the tiddlers in this story"
        },
        "$:/language/Buttons/Refresh/Caption": {
            "title": "$:/language/Buttons/Refresh/Caption",
            "text": "refresh"
        },
        "$:/language/Buttons/Refresh/Hint": {
            "title": "$:/language/Buttons/Refresh/Hint",
            "text": "Perform a full refresh of the wiki"
        },
        "$:/language/Buttons/Save/Caption": {
            "title": "$:/language/Buttons/Save/Caption",
            "text": "ok"
        },
        "$:/language/Buttons/Save/Hint": {
            "title": "$:/language/Buttons/Save/Hint",
            "text": "Confirm changes to this tiddler"
        },
        "$:/language/Buttons/SaveWiki/Caption": {
            "title": "$:/language/Buttons/SaveWiki/Caption",
            "text": "save changes"
        },
        "$:/language/Buttons/SaveWiki/Hint": {
            "title": "$:/language/Buttons/SaveWiki/Hint",
            "text": "Save changes"
        },
        "$:/language/Buttons/StoryView/Caption": {
            "title": "$:/language/Buttons/StoryView/Caption",
            "text": "storyview"
        },
        "$:/language/Buttons/StoryView/Hint": {
            "title": "$:/language/Buttons/StoryView/Hint",
            "text": "Choose the story visualisation"
        },
        "$:/language/Buttons/HideSideBar/Caption": {
            "title": "$:/language/Buttons/HideSideBar/Caption",
            "text": "hide sidebar"
        },
        "$:/language/Buttons/HideSideBar/Hint": {
            "title": "$:/language/Buttons/HideSideBar/Hint",
            "text": "Hide sidebar"
        },
        "$:/language/Buttons/ShowSideBar/Caption": {
            "title": "$:/language/Buttons/ShowSideBar/Caption",
            "text": "show sidebar"
        },
        "$:/language/Buttons/ShowSideBar/Hint": {
            "title": "$:/language/Buttons/ShowSideBar/Hint",
            "text": "Show sidebar"
        },
        "$:/language/Buttons/TagManager/Caption": {
            "title": "$:/language/Buttons/TagManager/Caption",
            "text": "tag manager"
        },
        "$:/language/Buttons/TagManager/Hint": {
            "title": "$:/language/Buttons/TagManager/Hint",
            "text": "Open tag manager"
        },
        "$:/language/Buttons/Theme/Caption": {
            "title": "$:/language/Buttons/Theme/Caption",
            "text": "theme"
        },
        "$:/language/Buttons/Theme/Hint": {
            "title": "$:/language/Buttons/Theme/Hint",
            "text": "Choose the display theme"
        },
        "$:/language/Buttons/Bold/Caption": {
            "title": "$:/language/Buttons/Bold/Caption",
            "text": "bold"
        },
        "$:/language/Buttons/Bold/Hint": {
            "title": "$:/language/Buttons/Bold/Hint",
            "text": "Apply bold formatting to selection"
        },
        "$:/language/Buttons/Clear/Caption": {
            "title": "$:/language/Buttons/Clear/Caption",
            "text": "clear"
        },
        "$:/language/Buttons/Clear/Hint": {
            "title": "$:/language/Buttons/Clear/Hint",
            "text": "Clear image to solid colour"
        },
        "$:/language/Buttons/EditorHeight/Caption": {
            "title": "$:/language/Buttons/EditorHeight/Caption",
            "text": "editor height"
        },
        "$:/language/Buttons/EditorHeight/Caption/Auto": {
            "title": "$:/language/Buttons/EditorHeight/Caption/Auto",
            "text": "Automatically adjust height to fit content"
        },
        "$:/language/Buttons/EditorHeight/Caption/Fixed": {
            "title": "$:/language/Buttons/EditorHeight/Caption/Fixed",
            "text": "Fixed height:"
        },
        "$:/language/Buttons/EditorHeight/Hint": {
            "title": "$:/language/Buttons/EditorHeight/Hint",
            "text": "Choose the height of the text editor"
        },
        "$:/language/Buttons/Excise/Caption": {
            "title": "$:/language/Buttons/Excise/Caption",
            "text": "excise"
        },
        "$:/language/Buttons/Excise/Caption/Excise": {
            "title": "$:/language/Buttons/Excise/Caption/Excise",
            "text": "Perform excision"
        },
        "$:/language/Buttons/Excise/Caption/MacroName": {
            "title": "$:/language/Buttons/Excise/Caption/MacroName",
            "text": "Macro name:"
        },
        "$:/language/Buttons/Excise/Caption/NewTitle": {
            "title": "$:/language/Buttons/Excise/Caption/NewTitle",
            "text": "Title of new tiddler:"
        },
        "$:/language/Buttons/Excise/Caption/Replace": {
            "title": "$:/language/Buttons/Excise/Caption/Replace",
            "text": "Replace excised text with:"
        },
        "$:/language/Buttons/Excise/Caption/Replace/Macro": {
            "title": "$:/language/Buttons/Excise/Caption/Replace/Macro",
            "text": "macro"
        },
        "$:/language/Buttons/Excise/Caption/Replace/Link": {
            "title": "$:/language/Buttons/Excise/Caption/Replace/Link",
            "text": "link"
        },
        "$:/language/Buttons/Excise/Caption/Replace/Transclusion": {
            "title": "$:/language/Buttons/Excise/Caption/Replace/Transclusion",
            "text": "transclusion"
        },
        "$:/language/Buttons/Excise/Caption/Tag": {
            "title": "$:/language/Buttons/Excise/Caption/Tag",
            "text": "Tag new tiddler with the title of this tiddler"
        },
        "$:/language/Buttons/Excise/Caption/TiddlerExists": {
            "title": "$:/language/Buttons/Excise/Caption/TiddlerExists",
            "text": "Warning: tiddler already exists"
        },
        "$:/language/Buttons/Excise/Hint": {
            "title": "$:/language/Buttons/Excise/Hint",
            "text": "Excise the selected text into a new tiddler"
        },
        "$:/language/Buttons/Heading1/Caption": {
            "title": "$:/language/Buttons/Heading1/Caption",
            "text": "heading 1"
        },
        "$:/language/Buttons/Heading1/Hint": {
            "title": "$:/language/Buttons/Heading1/Hint",
            "text": "Apply heading level 1 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading2/Caption": {
            "title": "$:/language/Buttons/Heading2/Caption",
            "text": "heading 2"
        },
        "$:/language/Buttons/Heading2/Hint": {
            "title": "$:/language/Buttons/Heading2/Hint",
            "text": "Apply heading level 2 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading3/Caption": {
            "title": "$:/language/Buttons/Heading3/Caption",
            "text": "heading 3"
        },
        "$:/language/Buttons/Heading3/Hint": {
            "title": "$:/language/Buttons/Heading3/Hint",
            "text": "Apply heading level 3 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading4/Caption": {
            "title": "$:/language/Buttons/Heading4/Caption",
            "text": "heading 4"
        },
        "$:/language/Buttons/Heading4/Hint": {
            "title": "$:/language/Buttons/Heading4/Hint",
            "text": "Apply heading level 4 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading5/Caption": {
            "title": "$:/language/Buttons/Heading5/Caption",
            "text": "heading 5"
        },
        "$:/language/Buttons/Heading5/Hint": {
            "title": "$:/language/Buttons/Heading5/Hint",
            "text": "Apply heading level 5 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading6/Caption": {
            "title": "$:/language/Buttons/Heading6/Caption",
            "text": "heading 6"
        },
        "$:/language/Buttons/Heading6/Hint": {
            "title": "$:/language/Buttons/Heading6/Hint",
            "text": "Apply heading level 6 formatting to lines containing selection"
        },
        "$:/language/Buttons/Italic/Caption": {
            "title": "$:/language/Buttons/Italic/Caption",
            "text": "italic"
        },
        "$:/language/Buttons/Italic/Hint": {
            "title": "$:/language/Buttons/Italic/Hint",
            "text": "Apply italic formatting to selection"
        },
        "$:/language/Buttons/LineWidth/Caption": {
            "title": "$:/language/Buttons/LineWidth/Caption",
            "text": "line width"
        },
        "$:/language/Buttons/LineWidth/Hint": {
            "title": "$:/language/Buttons/LineWidth/Hint",
            "text": "Set line width for painting"
        },
        "$:/language/Buttons/Link/Caption": {
            "title": "$:/language/Buttons/Link/Caption",
            "text": "link"
        },
        "$:/language/Buttons/Link/Hint": {
            "title": "$:/language/Buttons/Link/Hint",
            "text": "Create wikitext link"
        },
        "$:/language/Buttons/ListBullet/Caption": {
            "title": "$:/language/Buttons/ListBullet/Caption",
            "text": "bulleted list"
        },
        "$:/language/Buttons/ListBullet/Hint": {
            "title": "$:/language/Buttons/ListBullet/Hint",
            "text": "Apply bulleted list formatting to lines containing selection"
        },
        "$:/language/Buttons/ListNumber/Caption": {
            "title": "$:/language/Buttons/ListNumber/Caption",
            "text": "numbered list"
        },
        "$:/language/Buttons/ListNumber/Hint": {
            "title": "$:/language/Buttons/ListNumber/Hint",
            "text": "Apply numbered list formatting to lines containing selection"
        },
        "$:/language/Buttons/MonoBlock/Caption": {
            "title": "$:/language/Buttons/MonoBlock/Caption",
            "text": "monospaced block"
        },
        "$:/language/Buttons/MonoBlock/Hint": {
            "title": "$:/language/Buttons/MonoBlock/Hint",
            "text": "Apply monospaced block formatting to lines containing selection"
        },
        "$:/language/Buttons/MonoLine/Caption": {
            "title": "$:/language/Buttons/MonoLine/Caption",
            "text": "monospaced"
        },
        "$:/language/Buttons/MonoLine/Hint": {
            "title": "$:/language/Buttons/MonoLine/Hint",
            "text": "Apply monospaced character formatting to selection"
        },
        "$:/language/Buttons/Opacity/Caption": {
            "title": "$:/language/Buttons/Opacity/Caption",
            "text": "opacity"
        },
        "$:/language/Buttons/Opacity/Hint": {
            "title": "$:/language/Buttons/Opacity/Hint",
            "text": "Set painting opacity"
        },
        "$:/language/Buttons/Paint/Caption": {
            "title": "$:/language/Buttons/Paint/Caption",
            "text": "paint colour"
        },
        "$:/language/Buttons/Paint/Hint": {
            "title": "$:/language/Buttons/Paint/Hint",
            "text": "Set painting colour"
        },
        "$:/language/Buttons/Picture/Caption": {
            "title": "$:/language/Buttons/Picture/Caption",
            "text": "picture"
        },
        "$:/language/Buttons/Picture/Hint": {
            "title": "$:/language/Buttons/Picture/Hint",
            "text": "Insert picture"
        },
        "$:/language/Buttons/Preview/Caption": {
            "title": "$:/language/Buttons/Preview/Caption",
            "text": "preview"
        },
        "$:/language/Buttons/Preview/Hint": {
            "title": "$:/language/Buttons/Preview/Hint",
            "text": "Show preview pane"
        },
        "$:/language/Buttons/PreviewType/Caption": {
            "title": "$:/language/Buttons/PreviewType/Caption",
            "text": "preview type"
        },
        "$:/language/Buttons/PreviewType/Hint": {
            "title": "$:/language/Buttons/PreviewType/Hint",
            "text": "Choose preview type"
        },
        "$:/language/Buttons/Quote/Caption": {
            "title": "$:/language/Buttons/Quote/Caption",
            "text": "quote"
        },
        "$:/language/Buttons/Quote/Hint": {
            "title": "$:/language/Buttons/Quote/Hint",
            "text": "Apply quoted text formatting to lines containing selection"
        },
        "$:/language/Buttons/Size/Caption": {
            "title": "$:/language/Buttons/Size/Caption",
            "text": "image size"
        },
        "$:/language/Buttons/Size/Caption/Height": {
            "title": "$:/language/Buttons/Size/Caption/Height",
            "text": "Height:"
        },
        "$:/language/Buttons/Size/Caption/Resize": {
            "title": "$:/language/Buttons/Size/Caption/Resize",
            "text": "Resize image"
        },
        "$:/language/Buttons/Size/Caption/Width": {
            "title": "$:/language/Buttons/Size/Caption/Width",
            "text": "Width:"
        },
        "$:/language/Buttons/Size/Hint": {
            "title": "$:/language/Buttons/Size/Hint",
            "text": "Set image size"
        },
        "$:/language/Buttons/Stamp/Caption": {
            "title": "$:/language/Buttons/Stamp/Caption",
            "text": "stamp"
        },
        "$:/language/Buttons/Stamp/Caption/New": {
            "title": "$:/language/Buttons/Stamp/Caption/New",
            "text": "Add your own"
        },
        "$:/language/Buttons/Stamp/Hint": {
            "title": "$:/language/Buttons/Stamp/Hint",
            "text": "Insert a preconfigured snippet of text"
        },
        "$:/language/Buttons/Stamp/New/Title": {
            "title": "$:/language/Buttons/Stamp/New/Title",
            "text": "Name as shown in menu"
        },
        "$:/language/Buttons/Stamp/New/Text": {
            "title": "$:/language/Buttons/Stamp/New/Text",
            "text": "Text of snippet. (Remember to add a descriptive title in the caption field)."
        },
        "$:/language/Buttons/Strikethrough/Caption": {
            "title": "$:/language/Buttons/Strikethrough/Caption",
            "text": "strikethrough"
        },
        "$:/language/Buttons/Strikethrough/Hint": {
            "title": "$:/language/Buttons/Strikethrough/Hint",
            "text": "Apply strikethrough formatting to selection"
        },
        "$:/language/Buttons/Subscript/Caption": {
            "title": "$:/language/Buttons/Subscript/Caption",
            "text": "subscript"
        },
        "$:/language/Buttons/Subscript/Hint": {
            "title": "$:/language/Buttons/Subscript/Hint",
            "text": "Apply subscript formatting to selection"
        },
        "$:/language/Buttons/Superscript/Caption": {
            "title": "$:/language/Buttons/Superscript/Caption",
            "text": "superscript"
        },
        "$:/language/Buttons/Superscript/Hint": {
            "title": "$:/language/Buttons/Superscript/Hint",
            "text": "Apply superscript formatting to selection"
        },
        "$:/language/Buttons/Underline/Caption": {
            "title": "$:/language/Buttons/Underline/Caption",
            "text": "underline"
        },
        "$:/language/Buttons/Underline/Hint": {
            "title": "$:/language/Buttons/Underline/Hint",
            "text": "Apply underline formatting to selection"
        },
        "$:/language/ControlPanel/Advanced/Caption": {
            "title": "$:/language/ControlPanel/Advanced/Caption",
            "text": "Advanced"
        },
        "$:/language/ControlPanel/Advanced/Hint": {
            "title": "$:/language/ControlPanel/Advanced/Hint",
            "text": "Internal information about this TiddlyWiki"
        },
        "$:/language/ControlPanel/Appearance/Caption": {
            "title": "$:/language/ControlPanel/Appearance/Caption",
            "text": "Appearance"
        },
        "$:/language/ControlPanel/Appearance/Hint": {
            "title": "$:/language/ControlPanel/Appearance/Hint",
            "text": "Ways to customise the appearance of your TiddlyWiki."
        },
        "$:/language/ControlPanel/Basics/AnimDuration/Prompt": {
            "title": "$:/language/ControlPanel/Basics/AnimDuration/Prompt",
            "text": "Animation duration:"
        },
        "$:/language/ControlPanel/Basics/Caption": {
            "title": "$:/language/ControlPanel/Basics/Caption",
            "text": "Basics"
        },
        "$:/language/ControlPanel/Basics/DefaultTiddlers/BottomHint": {
            "title": "$:/language/ControlPanel/Basics/DefaultTiddlers/BottomHint",
            "text": "Use &#91;&#91;double square brackets&#93;&#93; for titles with spaces. Or you can choose to <$button set=\"$:/DefaultTiddlers\" setTo=\"[list[$:/StoryList]]\">retain story ordering</$button>"
        },
        "$:/language/ControlPanel/Basics/DefaultTiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/DefaultTiddlers/Prompt",
            "text": "Default tiddlers:"
        },
        "$:/language/ControlPanel/Basics/DefaultTiddlers/TopHint": {
            "title": "$:/language/ControlPanel/Basics/DefaultTiddlers/TopHint",
            "text": "Choose which tiddlers are displayed at startup:"
        },
        "$:/language/ControlPanel/Basics/Language/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Language/Prompt",
            "text": "Hello! Current language:"
        },
        "$:/language/ControlPanel/Basics/NewJournal/Title/Prompt": {
            "title": "$:/language/ControlPanel/Basics/NewJournal/Title/Prompt",
            "text": "Title of new journal tiddlers"
        },
        "$:/language/ControlPanel/Basics/NewJournal/Tags/Prompt": {
            "title": "$:/language/ControlPanel/Basics/NewJournal/Tags/Prompt",
            "text": "Tags for new journal tiddlers"
        },
        "$:/language/ControlPanel/Basics/OverriddenShadowTiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/OverriddenShadowTiddlers/Prompt",
            "text": "Number of overridden shadow tiddlers:"
        },
        "$:/language/ControlPanel/Basics/ShadowTiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/ShadowTiddlers/Prompt",
            "text": "Number of shadow tiddlers:"
        },
        "$:/language/ControlPanel/Basics/Subtitle/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Subtitle/Prompt",
            "text": "Subtitle:"
        },
        "$:/language/ControlPanel/Basics/SystemTiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/SystemTiddlers/Prompt",
            "text": "Number of system tiddlers:"
        },
        "$:/language/ControlPanel/Basics/Tags/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Tags/Prompt",
            "text": "Number of tags:"
        },
        "$:/language/ControlPanel/Basics/Tiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Tiddlers/Prompt",
            "text": "Number of tiddlers:"
        },
        "$:/language/ControlPanel/Basics/Title/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Title/Prompt",
            "text": "Title of this ~TiddlyWiki:"
        },
        "$:/language/ControlPanel/Basics/Username/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Username/Prompt",
            "text": "Username for signing edits:"
        },
        "$:/language/ControlPanel/Basics/Version/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Version/Prompt",
            "text": "~TiddlyWiki version:"
        },
        "$:/language/ControlPanel/EditorTypes/Caption": {
            "title": "$:/language/ControlPanel/EditorTypes/Caption",
            "text": "Editor Types"
        },
        "$:/language/ControlPanel/EditorTypes/Editor/Caption": {
            "title": "$:/language/ControlPanel/EditorTypes/Editor/Caption",
            "text": "Editor"
        },
        "$:/language/ControlPanel/EditorTypes/Hint": {
            "title": "$:/language/ControlPanel/EditorTypes/Hint",
            "text": "These tiddlers determine which editor is used to edit specific tiddler types."
        },
        "$:/language/ControlPanel/EditorTypes/Type/Caption": {
            "title": "$:/language/ControlPanel/EditorTypes/Type/Caption",
            "text": "Type"
        },
        "$:/language/ControlPanel/Info/Caption": {
            "title": "$:/language/ControlPanel/Info/Caption",
            "text": "Info"
        },
        "$:/language/ControlPanel/Info/Hint": {
            "title": "$:/language/ControlPanel/Info/Hint",
            "text": "Information about this TiddlyWiki"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Add/Prompt": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Add/Prompt",
            "text": "Type shortcut here"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Add/Caption": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Add/Caption",
            "text": "add shortcut"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Caption": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Caption",
            "text": "Keyboard Shortcuts"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Hint": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Hint",
            "text": "Manage keyboard shortcut assignments"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/NoShortcuts/Caption": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/NoShortcuts/Caption",
            "text": "No keyboard shortcuts assigned"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Remove/Hint": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Remove/Hint",
            "text": "remove keyboard shortcut"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/All": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/All",
            "text": "All platforms"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/Mac": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/Mac",
            "text": "Macintosh platform only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonMac": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonMac",
            "text": "Non-Macintosh platforms only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/Linux": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/Linux",
            "text": "Linux platform only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonLinux": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonLinux",
            "text": "Non-Linux platforms only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/Windows": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/Windows",
            "text": "Windows platform only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonWindows": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonWindows",
            "text": "Non-Windows platforms only"
        },
        "$:/language/ControlPanel/LoadedModules/Caption": {
            "title": "$:/language/ControlPanel/LoadedModules/Caption",
            "text": "Loaded Modules"
        },
        "$:/language/ControlPanel/LoadedModules/Hint": {
            "title": "$:/language/ControlPanel/LoadedModules/Hint",
            "text": "These are the currently loaded tiddler modules linked to their source tiddlers. Any italicised modules lack a source tiddler, typically because they were setup during the boot process."
        },
        "$:/language/ControlPanel/Palette/Caption": {
            "title": "$:/language/ControlPanel/Palette/Caption",
            "text": "Palette"
        },
        "$:/language/ControlPanel/Palette/Editor/Clone/Caption": {
            "title": "$:/language/ControlPanel/Palette/Editor/Clone/Caption",
            "text": "clone"
        },
        "$:/language/ControlPanel/Palette/Editor/Clone/Prompt": {
            "title": "$:/language/ControlPanel/Palette/Editor/Clone/Prompt",
            "text": "It is recommended that you clone this shadow palette before editing it"
        },
        "$:/language/ControlPanel/Palette/Editor/Prompt/Modified": {
            "title": "$:/language/ControlPanel/Palette/Editor/Prompt/Modified",
            "text": "This shadow palette has been modified"
        },
        "$:/language/ControlPanel/Palette/Editor/Prompt": {
            "title": "$:/language/ControlPanel/Palette/Editor/Prompt",
            "text": "Editing"
        },
        "$:/language/ControlPanel/Palette/Editor/Reset/Caption": {
            "title": "$:/language/ControlPanel/Palette/Editor/Reset/Caption",
            "text": "reset"
        },
        "$:/language/ControlPanel/Palette/HideEditor/Caption": {
            "title": "$:/language/ControlPanel/Palette/HideEditor/Caption",
            "text": "hide editor"
        },
        "$:/language/ControlPanel/Palette/Prompt": {
            "title": "$:/language/ControlPanel/Palette/Prompt",
            "text": "Current palette:"
        },
        "$:/language/ControlPanel/Palette/ShowEditor/Caption": {
            "title": "$:/language/ControlPanel/Palette/ShowEditor/Caption",
            "text": "show editor"
        },
        "$:/language/ControlPanel/Parsing/Caption": {
            "title": "$:/language/ControlPanel/Parsing/Caption",
            "text": "Parsing"
        },
        "$:/language/ControlPanel/Parsing/Hint": {
            "title": "$:/language/ControlPanel/Parsing/Hint",
            "text": "Here you can globally disable individual wiki parser rules. Take care as disabling some parser rules can prevent ~TiddlyWiki functioning correctly (you can restore normal operation with [[safe mode|http://tiddlywiki.com/#SafeMode]] )"
        },
        "$:/language/ControlPanel/Parsing/Block/Caption": {
            "title": "$:/language/ControlPanel/Parsing/Block/Caption",
            "text": "Block Parse Rules"
        },
        "$:/language/ControlPanel/Parsing/Inline/Caption": {
            "title": "$:/language/ControlPanel/Parsing/Inline/Caption",
            "text": "Inline Parse Rules"
        },
        "$:/language/ControlPanel/Parsing/Pragma/Caption": {
            "title": "$:/language/ControlPanel/Parsing/Pragma/Caption",
            "text": "Pragma Parse Rules"
        },
        "$:/language/ControlPanel/Plugins/Add/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Add/Caption",
            "text": "Get more plugins"
        },
        "$:/language/ControlPanel/Plugins/Add/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Add/Hint",
            "text": "Install plugins from the official library"
        },
        "$:/language/ControlPanel/Plugins/AlreadyInstalled/Hint": {
            "title": "$:/language/ControlPanel/Plugins/AlreadyInstalled/Hint",
            "text": "This plugin is already installed at version <$text text=<<installedVersion>>/>"
        },
        "$:/language/ControlPanel/Plugins/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Caption",
            "text": "Plugins"
        },
        "$:/language/ControlPanel/Plugins/Disable/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Disable/Caption",
            "text": "disable"
        },
        "$:/language/ControlPanel/Plugins/Disable/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Disable/Hint",
            "text": "Disable this plugin when reloading page"
        },
        "$:/language/ControlPanel/Plugins/Disabled/Status": {
            "title": "$:/language/ControlPanel/Plugins/Disabled/Status",
            "text": "(disabled)"
        },
        "$:/language/ControlPanel/Plugins/Empty/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Empty/Hint",
            "text": "None"
        },
        "$:/language/ControlPanel/Plugins/Enable/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Enable/Caption",
            "text": "enable"
        },
        "$:/language/ControlPanel/Plugins/Enable/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Enable/Hint",
            "text": "Enable this plugin when reloading page"
        },
        "$:/language/ControlPanel/Plugins/Install/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Install/Caption",
            "text": "install"
        },
        "$:/language/ControlPanel/Plugins/Installed/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Installed/Hint",
            "text": "Currently installed plugins:"
        },
        "$:/language/ControlPanel/Plugins/Languages/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Languages/Caption",
            "text": "Languages"
        },
        "$:/language/ControlPanel/Plugins/Languages/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Languages/Hint",
            "text": "Language pack plugins"
        },
        "$:/language/ControlPanel/Plugins/NoInfoFound/Hint": {
            "title": "$:/language/ControlPanel/Plugins/NoInfoFound/Hint",
            "text": "No ''\"<$text text=<<currentTab>>/>\"'' found"
        },
        "$:/language/ControlPanel/Plugins/NoInformation/Hint": {
            "title": "$:/language/ControlPanel/Plugins/NoInformation/Hint",
            "text": "No information provided"
        },
        "$:/language/ControlPanel/Plugins/NotInstalled/Hint": {
            "title": "$:/language/ControlPanel/Plugins/NotInstalled/Hint",
            "text": "This plugin is not currently installed"
        },
        "$:/language/ControlPanel/Plugins/OpenPluginLibrary": {
            "title": "$:/language/ControlPanel/Plugins/OpenPluginLibrary",
            "text": "open plugin library"
        },
        "$:/language/ControlPanel/Plugins/Plugins/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Plugins/Caption",
            "text": "Plugins"
        },
        "$:/language/ControlPanel/Plugins/Plugins/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Plugins/Hint",
            "text": "Plugins"
        },
        "$:/language/ControlPanel/Plugins/Reinstall/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Reinstall/Caption",
            "text": "reinstall"
        },
        "$:/language/ControlPanel/Plugins/Themes/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Themes/Caption",
            "text": "Themes"
        },
        "$:/language/ControlPanel/Plugins/Themes/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Themes/Hint",
            "text": "Theme plugins"
        },
        "$:/language/ControlPanel/Saving/Caption": {
            "title": "$:/language/ControlPanel/Saving/Caption",
            "text": "Saving"
        },
        "$:/language/ControlPanel/Saving/Heading": {
            "title": "$:/language/ControlPanel/Saving/Heading",
            "text": "Saving"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Advanced/Heading": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Advanced/Heading",
            "text": "Advanced Settings"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/BackupDir": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/BackupDir",
            "text": "Backup Directory"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Backups": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Backups",
            "text": "Backups"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Description": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Description",
            "text": "These settings are only used when saving to http://tiddlyspot.com or a compatible remote server"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Filename": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Filename",
            "text": "Upload Filename"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Heading": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Heading",
            "text": "~TiddlySpot"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Hint": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Hint",
            "text": "//The server URL defaults to `http://<wikiname>.tiddlyspot.com/store.cgi` and can be changed to use a custom server address, e.g. `http://example.com/store.php`.//"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Password": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Password",
            "text": "Password"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/ServerURL": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/ServerURL",
            "text": "Server URL"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/UploadDir": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/UploadDir",
            "text": "Upload Directory"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/UserName": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/UserName",
            "text": "Wiki Name"
        },
        "$:/language/ControlPanel/Settings/AutoSave/Caption": {
            "title": "$:/language/ControlPanel/Settings/AutoSave/Caption",
            "text": "Autosave"
        },
        "$:/language/ControlPanel/Settings/AutoSave/Disabled/Description": {
            "title": "$:/language/ControlPanel/Settings/AutoSave/Disabled/Description",
            "text": "Do not save changes automatically"
        },
        "$:/language/ControlPanel/Settings/AutoSave/Enabled/Description": {
            "title": "$:/language/ControlPanel/Settings/AutoSave/Enabled/Description",
            "text": "Save changes automatically"
        },
        "$:/language/ControlPanel/Settings/AutoSave/Hint": {
            "title": "$:/language/ControlPanel/Settings/AutoSave/Hint",
            "text": "Automatically save changes during editing"
        },
        "$:/language/ControlPanel/Settings/CamelCase/Caption": {
            "title": "$:/language/ControlPanel/Settings/CamelCase/Caption",
            "text": "Camel Case Wiki Links"
        },
        "$:/language/ControlPanel/Settings/CamelCase/Hint": {
            "title": "$:/language/ControlPanel/Settings/CamelCase/Hint",
            "text": "You can globally disable automatic linking of ~CamelCase phrases. Requires reload to take effect"
        },
        "$:/language/ControlPanel/Settings/CamelCase/Description": {
            "title": "$:/language/ControlPanel/Settings/CamelCase/Description",
            "text": "Enable automatic ~CamelCase linking"
        },
        "$:/language/ControlPanel/Settings/Caption": {
            "title": "$:/language/ControlPanel/Settings/Caption",
            "text": "Settings"
        },
        "$:/language/ControlPanel/Settings/EditorToolbar/Caption": {
            "title": "$:/language/ControlPanel/Settings/EditorToolbar/Caption",
            "text": "Editor Toolbar"
        },
        "$:/language/ControlPanel/Settings/EditorToolbar/Hint": {
            "title": "$:/language/ControlPanel/Settings/EditorToolbar/Hint",
            "text": "Enable or disable the editor toolbar:"
        },
        "$:/language/ControlPanel/Settings/EditorToolbar/Description": {
            "title": "$:/language/ControlPanel/Settings/EditorToolbar/Description",
            "text": "Show editor toolbar"
        },
        "$:/language/ControlPanel/Settings/Hint": {
            "title": "$:/language/ControlPanel/Settings/Hint",
            "text": "These settings let you customise the behaviour of TiddlyWiki."
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/Caption": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/Caption",
            "text": "Navigation Address Bar"
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/Hint": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/Hint",
            "text": "Behaviour of the browser address bar when navigating to a tiddler:"
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/No/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/No/Description",
            "text": "Do not update the address bar"
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/Permalink/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/Permalink/Description",
            "text": "Include the target tiddler"
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/Permaview/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/Permaview/Description",
            "text": "Include the target tiddler and the current story sequence"
        },
        "$:/language/ControlPanel/Settings/NavigationHistory/Caption": {
            "title": "$:/language/ControlPanel/Settings/NavigationHistory/Caption",
            "text": "Navigation History"
        },
        "$:/language/ControlPanel/Settings/NavigationHistory/Hint": {
            "title": "$:/language/ControlPanel/Settings/NavigationHistory/Hint",
            "text": "Update browser history when navigating to a tiddler:"
        },
        "$:/language/ControlPanel/Settings/NavigationHistory/No/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationHistory/No/Description",
            "text": "Do not update history"
        },
        "$:/language/ControlPanel/Settings/NavigationHistory/Yes/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationHistory/Yes/Description",
            "text": "Update history"
        },
        "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Caption": {
            "title": "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Caption",
            "text": "Performance Instrumentation"
        },
        "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Hint": {
            "title": "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Hint",
            "text": "Displays performance statistics in the browser developer console. Requires reload to take effect"
        },
        "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Description": {
            "title": "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Description",
            "text": "Enable performance instrumentation"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Caption": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Caption",
            "text": "Toolbar Button Style"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Hint": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Hint",
            "text": "Choose the style for toolbar buttons:"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Borderless": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Borderless",
            "text": "Borderless"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Boxed": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Boxed",
            "text": "Boxed"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Rounded": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Rounded",
            "text": "Rounded"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtons/Caption": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtons/Caption",
            "text": "Toolbar Buttons"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtons/Hint": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtons/Hint",
            "text": "Default toolbar button appearance:"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtons/Icons/Description": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtons/Icons/Description",
            "text": "Include icon"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtons/Text/Description": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtons/Text/Description",
            "text": "Include text"
        },
        "$:/language/ControlPanel/Settings/DefaultSidebarTab/Caption": {
            "title": "$:/language/ControlPanel/Settings/DefaultSidebarTab/Caption",
            "text": "Default Sidebar Tab"
        },
        "$:/language/ControlPanel/Settings/DefaultSidebarTab/Hint": {
            "title": "$:/language/ControlPanel/Settings/DefaultSidebarTab/Hint",
            "text": "Specify which sidebar tab is displayed by default"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/Caption": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/Caption",
            "text": "Tiddler Opening Behaviour"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/InsideRiver/Hint": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/InsideRiver/Hint",
            "text": "Navigation from //within// the story river"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OutsideRiver/Hint": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OutsideRiver/Hint",
            "text": "Navigation from //outside// the story river"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAbove": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAbove",
            "text": "Open above the current tiddler"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenBelow": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenBelow",
            "text": "Open below the current tiddler"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAtTop": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAtTop",
            "text": "Open at the top of the story river"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAtBottom": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAtBottom",
            "text": "Open at the bottom of the story river"
        },
        "$:/language/ControlPanel/Settings/TitleLinks/Caption": {
            "title": "$:/language/ControlPanel/Settings/TitleLinks/Caption",
            "text": "Tiddler Titles"
        },
        "$:/language/ControlPanel/Settings/TitleLinks/Hint": {
            "title": "$:/language/ControlPanel/Settings/TitleLinks/Hint",
            "text": "Optionally display tiddler titles as links"
        },
        "$:/language/ControlPanel/Settings/TitleLinks/No/Description": {
            "title": "$:/language/ControlPanel/Settings/TitleLinks/No/Description",
            "text": "Do not display tiddler titles as links"
        },
        "$:/language/ControlPanel/Settings/TitleLinks/Yes/Description": {
            "title": "$:/language/ControlPanel/Settings/TitleLinks/Yes/Description",
            "text": "Display tiddler titles as links"
        },
        "$:/language/ControlPanel/Settings/MissingLinks/Caption": {
            "title": "$:/language/ControlPanel/Settings/MissingLinks/Caption",
            "text": "Wiki Links"
        },
        "$:/language/ControlPanel/Settings/MissingLinks/Hint": {
            "title": "$:/language/ControlPanel/Settings/MissingLinks/Hint",
            "text": "Choose whether to link to tiddlers that do not exist yet"
        },
        "$:/language/ControlPanel/Settings/MissingLinks/Description": {
            "title": "$:/language/ControlPanel/Settings/MissingLinks/Description",
            "text": "Enable links to missing tiddlers"
        },
        "$:/language/ControlPanel/StoryView/Caption": {
            "title": "$:/language/ControlPanel/StoryView/Caption",
            "text": "Story View"
        },
        "$:/language/ControlPanel/StoryView/Prompt": {
            "title": "$:/language/ControlPanel/StoryView/Prompt",
            "text": "Current view:"
        },
        "$:/language/ControlPanel/Theme/Caption": {
            "title": "$:/language/ControlPanel/Theme/Caption",
            "text": "Theme"
        },
        "$:/language/ControlPanel/Theme/Prompt": {
            "title": "$:/language/ControlPanel/Theme/Prompt",
            "text": "Current theme:"
        },
        "$:/language/ControlPanel/TiddlerFields/Caption": {
            "title": "$:/language/ControlPanel/TiddlerFields/Caption",
            "text": "Tiddler Fields"
        },
        "$:/language/ControlPanel/TiddlerFields/Hint": {
            "title": "$:/language/ControlPanel/TiddlerFields/Hint",
            "text": "This is the full set of TiddlerFields in use in this wiki (including system tiddlers but excluding shadow tiddlers)."
        },
        "$:/language/ControlPanel/Toolbars/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/Caption",
            "text": "Toolbars"
        },
        "$:/language/ControlPanel/Toolbars/EditToolbar/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/EditToolbar/Caption",
            "text": "Edit Toolbar"
        },
        "$:/language/ControlPanel/Toolbars/EditToolbar/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/EditToolbar/Hint",
            "text": "Choose which buttons are displayed for tiddlers in edit mode"
        },
        "$:/language/ControlPanel/Toolbars/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/Hint",
            "text": "Select which toolbar buttons are displayed"
        },
        "$:/language/ControlPanel/Toolbars/PageControls/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/PageControls/Caption",
            "text": "Page Toolbar"
        },
        "$:/language/ControlPanel/Toolbars/PageControls/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/PageControls/Hint",
            "text": "Choose which buttons are displayed on the main page toolbar"
        },
        "$:/language/ControlPanel/Toolbars/EditorToolbar/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/EditorToolbar/Caption",
            "text": "Editor Toolbar"
        },
        "$:/language/ControlPanel/Toolbars/EditorToolbar/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/EditorToolbar/Hint",
            "text": "Choose which buttons are displayed in the editor toolbar. Note that some buttons will only appear when editing tiddlers of a certain type"
        },
        "$:/language/ControlPanel/Toolbars/ViewToolbar/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/ViewToolbar/Caption",
            "text": "View Toolbar"
        },
        "$:/language/ControlPanel/Toolbars/ViewToolbar/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/ViewToolbar/Hint",
            "text": "Choose which buttons are displayed for tiddlers in view mode"
        },
        "$:/language/ControlPanel/Tools/Download/Full/Caption": {
            "title": "$:/language/ControlPanel/Tools/Download/Full/Caption",
            "text": "Download full wiki"
        },
        "$:/language/Date/DaySuffix/1": {
            "title": "$:/language/Date/DaySuffix/1",
            "text": "st"
        },
        "$:/language/Date/DaySuffix/2": {
            "title": "$:/language/Date/DaySuffix/2",
            "text": "nd"
        },
        "$:/language/Date/DaySuffix/3": {
            "title": "$:/language/Date/DaySuffix/3",
            "text": "rd"
        },
        "$:/language/Date/DaySuffix/4": {
            "title": "$:/language/Date/DaySuffix/4",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/5": {
            "title": "$:/language/Date/DaySuffix/5",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/6": {
            "title": "$:/language/Date/DaySuffix/6",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/7": {
            "title": "$:/language/Date/DaySuffix/7",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/8": {
            "title": "$:/language/Date/DaySuffix/8",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/9": {
            "title": "$:/language/Date/DaySuffix/9",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/10": {
            "title": "$:/language/Date/DaySuffix/10",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/11": {
            "title": "$:/language/Date/DaySuffix/11",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/12": {
            "title": "$:/language/Date/DaySuffix/12",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/13": {
            "title": "$:/language/Date/DaySuffix/13",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/14": {
            "title": "$:/language/Date/DaySuffix/14",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/15": {
            "title": "$:/language/Date/DaySuffix/15",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/16": {
            "title": "$:/language/Date/DaySuffix/16",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/17": {
            "title": "$:/language/Date/DaySuffix/17",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/18": {
            "title": "$:/language/Date/DaySuffix/18",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/19": {
            "title": "$:/language/Date/DaySuffix/19",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/20": {
            "title": "$:/language/Date/DaySuffix/20",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/21": {
            "title": "$:/language/Date/DaySuffix/21",
            "text": "st"
        },
        "$:/language/Date/DaySuffix/22": {
            "title": "$:/language/Date/DaySuffix/22",
            "text": "nd"
        },
        "$:/language/Date/DaySuffix/23": {
            "title": "$:/language/Date/DaySuffix/23",
            "text": "rd"
        },
        "$:/language/Date/DaySuffix/24": {
            "title": "$:/language/Date/DaySuffix/24",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/25": {
            "title": "$:/language/Date/DaySuffix/25",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/26": {
            "title": "$:/language/Date/DaySuffix/26",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/27": {
            "title": "$:/language/Date/DaySuffix/27",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/28": {
            "title": "$:/language/Date/DaySuffix/28",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/29": {
            "title": "$:/language/Date/DaySuffix/29",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/30": {
            "title": "$:/language/Date/DaySuffix/30",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/31": {
            "title": "$:/language/Date/DaySuffix/31",
            "text": "st"
        },
        "$:/language/Date/Long/Day/0": {
            "title": "$:/language/Date/Long/Day/0",
            "text": "Sunday"
        },
        "$:/language/Date/Long/Day/1": {
            "title": "$:/language/Date/Long/Day/1",
            "text": "Monday"
        },
        "$:/language/Date/Long/Day/2": {
            "title": "$:/language/Date/Long/Day/2",
            "text": "Tuesday"
        },
        "$:/language/Date/Long/Day/3": {
            "title": "$:/language/Date/Long/Day/3",
            "text": "Wednesday"
        },
        "$:/language/Date/Long/Day/4": {
            "title": "$:/language/Date/Long/Day/4",
            "text": "Thursday"
        },
        "$:/language/Date/Long/Day/5": {
            "title": "$:/language/Date/Long/Day/5",
            "text": "Friday"
        },
        "$:/language/Date/Long/Day/6": {
            "title": "$:/language/Date/Long/Day/6",
            "text": "Saturday"
        },
        "$:/language/Date/Long/Month/1": {
            "title": "$:/language/Date/Long/Month/1",
            "text": "January"
        },
        "$:/language/Date/Long/Month/2": {
            "title": "$:/language/Date/Long/Month/2",
            "text": "February"
        },
        "$:/language/Date/Long/Month/3": {
            "title": "$:/language/Date/Long/Month/3",
            "text": "March"
        },
        "$:/language/Date/Long/Month/4": {
            "title": "$:/language/Date/Long/Month/4",
            "text": "April"
        },
        "$:/language/Date/Long/Month/5": {
            "title": "$:/language/Date/Long/Month/5",
            "text": "May"
        },
        "$:/language/Date/Long/Month/6": {
            "title": "$:/language/Date/Long/Month/6",
            "text": "June"
        },
        "$:/language/Date/Long/Month/7": {
            "title": "$:/language/Date/Long/Month/7",
            "text": "July"
        },
        "$:/language/Date/Long/Month/8": {
            "title": "$:/language/Date/Long/Month/8",
            "text": "August"
        },
        "$:/language/Date/Long/Month/9": {
            "title": "$:/language/Date/Long/Month/9",
            "text": "September"
        },
        "$:/language/Date/Long/Month/10": {
            "title": "$:/language/Date/Long/Month/10",
            "text": "October"
        },
        "$:/language/Date/Long/Month/11": {
            "title": "$:/language/Date/Long/Month/11",
            "text": "November"
        },
        "$:/language/Date/Long/Month/12": {
            "title": "$:/language/Date/Long/Month/12",
            "text": "December"
        },
        "$:/language/Date/Period/am": {
            "title": "$:/language/Date/Period/am",
            "text": "am"
        },
        "$:/language/Date/Period/pm": {
            "title": "$:/language/Date/Period/pm",
            "text": "pm"
        },
        "$:/language/Date/Short/Day/0": {
            "title": "$:/language/Date/Short/Day/0",
            "text": "Sun"
        },
        "$:/language/Date/Short/Day/1": {
            "title": "$:/language/Date/Short/Day/1",
            "text": "Mon"
        },
        "$:/language/Date/Short/Day/2": {
            "title": "$:/language/Date/Short/Day/2",
            "text": "Tue"
        },
        "$:/language/Date/Short/Day/3": {
            "title": "$:/language/Date/Short/Day/3",
            "text": "Wed"
        },
        "$:/language/Date/Short/Day/4": {
            "title": "$:/language/Date/Short/Day/4",
            "text": "Thu"
        },
        "$:/language/Date/Short/Day/5": {
            "title": "$:/language/Date/Short/Day/5",
            "text": "Fri"
        },
        "$:/language/Date/Short/Day/6": {
            "title": "$:/language/Date/Short/Day/6",
            "text": "Sat"
        },
        "$:/language/Date/Short/Month/1": {
            "title": "$:/language/Date/Short/Month/1",
            "text": "Jan"
        },
        "$:/language/Date/Short/Month/2": {
            "title": "$:/language/Date/Short/Month/2",
            "text": "Feb"
        },
        "$:/language/Date/Short/Month/3": {
            "title": "$:/language/Date/Short/Month/3",
            "text": "Mar"
        },
        "$:/language/Date/Short/Month/4": {
            "title": "$:/language/Date/Short/Month/4",
            "text": "Apr"
        },
        "$:/language/Date/Short/Month/5": {
            "title": "$:/language/Date/Short/Month/5",
            "text": "May"
        },
        "$:/language/Date/Short/Month/6": {
            "title": "$:/language/Date/Short/Month/6",
            "text": "Jun"
        },
        "$:/language/Date/Short/Month/7": {
            "title": "$:/language/Date/Short/Month/7",
            "text": "Jul"
        },
        "$:/language/Date/Short/Month/8": {
            "title": "$:/language/Date/Short/Month/8",
            "text": "Aug"
        },
        "$:/language/Date/Short/Month/9": {
            "title": "$:/language/Date/Short/Month/9",
            "text": "Sep"
        },
        "$:/language/Date/Short/Month/10": {
            "title": "$:/language/Date/Short/Month/10",
            "text": "Oct"
        },
        "$:/language/Date/Short/Month/11": {
            "title": "$:/language/Date/Short/Month/11",
            "text": "Nov"
        },
        "$:/language/Date/Short/Month/12": {
            "title": "$:/language/Date/Short/Month/12",
            "text": "Dec"
        },
        "$:/language/RelativeDate/Future/Days": {
            "title": "$:/language/RelativeDate/Future/Days",
            "text": "<<period>> days from now"
        },
        "$:/language/RelativeDate/Future/Hours": {
            "title": "$:/language/RelativeDate/Future/Hours",
            "text": "<<period>> hours from now"
        },
        "$:/language/RelativeDate/Future/Minutes": {
            "title": "$:/language/RelativeDate/Future/Minutes",
            "text": "<<period>> minutes from now"
        },
        "$:/language/RelativeDate/Future/Months": {
            "title": "$:/language/RelativeDate/Future/Months",
            "text": "<<period>> months from now"
        },
        "$:/language/RelativeDate/Future/Second": {
            "title": "$:/language/RelativeDate/Future/Second",
            "text": "1 second from now"
        },
        "$:/language/RelativeDate/Future/Seconds": {
            "title": "$:/language/RelativeDate/Future/Seconds",
            "text": "<<period>> seconds from now"
        },
        "$:/language/RelativeDate/Future/Years": {
            "title": "$:/language/RelativeDate/Future/Years",
            "text": "<<period>> years from now"
        },
        "$:/language/RelativeDate/Past/Days": {
            "title": "$:/language/RelativeDate/Past/Days",
            "text": "<<period>> days ago"
        },
        "$:/language/RelativeDate/Past/Hours": {
            "title": "$:/language/RelativeDate/Past/Hours",
            "text": "<<period>> hours ago"
        },
        "$:/language/RelativeDate/Past/Minutes": {
            "title": "$:/language/RelativeDate/Past/Minutes",
            "text": "<<period>> minutes ago"
        },
        "$:/language/RelativeDate/Past/Months": {
            "title": "$:/language/RelativeDate/Past/Months",
            "text": "<<period>> months ago"
        },
        "$:/language/RelativeDate/Past/Second": {
            "title": "$:/language/RelativeDate/Past/Second",
            "text": "1 second ago"
        },
        "$:/language/RelativeDate/Past/Seconds": {
            "title": "$:/language/RelativeDate/Past/Seconds",
            "text": "<<period>> seconds ago"
        },
        "$:/language/RelativeDate/Past/Years": {
            "title": "$:/language/RelativeDate/Past/Years",
            "text": "<<period>> years ago"
        },
        "$:/language/Docs/ModuleTypes/animation": {
            "title": "$:/language/Docs/ModuleTypes/animation",
            "text": "Animations that may be used with the RevealWidget."
        },
        "$:/language/Docs/ModuleTypes/command": {
            "title": "$:/language/Docs/ModuleTypes/command",
            "text": "Commands that can be executed under Node.js."
        },
        "$:/language/Docs/ModuleTypes/config": {
            "title": "$:/language/Docs/ModuleTypes/config",
            "text": "Data to be inserted into `$tw.config`."
        },
        "$:/language/Docs/ModuleTypes/filteroperator": {
            "title": "$:/language/Docs/ModuleTypes/filteroperator",
            "text": "Individual filter operator methods."
        },
        "$:/language/Docs/ModuleTypes/global": {
            "title": "$:/language/Docs/ModuleTypes/global",
            "text": "Global data to be inserted into `$tw`."
        },
        "$:/language/Docs/ModuleTypes/isfilteroperator": {
            "title": "$:/language/Docs/ModuleTypes/isfilteroperator",
            "text": "Operands for the ''is'' filter operator."
        },
        "$:/language/Docs/ModuleTypes/macro": {
            "title": "$:/language/Docs/ModuleTypes/macro",
            "text": "JavaScript macro definitions."
        },
        "$:/language/Docs/ModuleTypes/parser": {
            "title": "$:/language/Docs/ModuleTypes/parser",
            "text": "Parsers for different content types."
        },
        "$:/language/Docs/ModuleTypes/saver": {
            "title": "$:/language/Docs/ModuleTypes/saver",
            "text": "Savers handle different methods for saving files from the browser."
        },
        "$:/language/Docs/ModuleTypes/startup": {
            "title": "$:/language/Docs/ModuleTypes/startup",
            "text": "Startup functions."
        },
        "$:/language/Docs/ModuleTypes/storyview": {
            "title": "$:/language/Docs/ModuleTypes/storyview",
            "text": "Story views customise the animation and behaviour of list widgets."
        },
        "$:/language/Docs/ModuleTypes/tiddlerdeserializer": {
            "title": "$:/language/Docs/ModuleTypes/tiddlerdeserializer",
            "text": "Converts different content types into tiddlers."
        },
        "$:/language/Docs/ModuleTypes/tiddlerfield": {
            "title": "$:/language/Docs/ModuleTypes/tiddlerfield",
            "text": "Defines the behaviour of an individual tiddler field."
        },
        "$:/language/Docs/ModuleTypes/tiddlermethod": {
            "title": "$:/language/Docs/ModuleTypes/tiddlermethod",
            "text": "Adds methods to the `$tw.Tiddler` prototype."
        },
        "$:/language/Docs/ModuleTypes/upgrader": {
            "title": "$:/language/Docs/ModuleTypes/upgrader",
            "text": "Applies upgrade processing to tiddlers during an upgrade/import."
        },
        "$:/language/Docs/ModuleTypes/utils": {
            "title": "$:/language/Docs/ModuleTypes/utils",
            "text": "Adds methods to `$tw.utils`."
        },
        "$:/language/Docs/ModuleTypes/utils-node": {
            "title": "$:/language/Docs/ModuleTypes/utils-node",
            "text": "Adds Node.js-specific methods to `$tw.utils`."
        },
        "$:/language/Docs/ModuleTypes/widget": {
            "title": "$:/language/Docs/ModuleTypes/widget",
            "text": "Widgets encapsulate DOM rendering and refreshing."
        },
        "$:/language/Docs/ModuleTypes/wikimethod": {
            "title": "$:/language/Docs/ModuleTypes/wikimethod",
            "text": "Adds methods to `$tw.Wiki`."
        },
        "$:/language/Docs/ModuleTypes/wikirule": {
            "title": "$:/language/Docs/ModuleTypes/wikirule",
            "text": "Individual parser rules for the main WikiText parser."
        },
        "$:/language/Docs/PaletteColours/alert-background": {
            "title": "$:/language/Docs/PaletteColours/alert-background",
            "text": "Alert background"
        },
        "$:/language/Docs/PaletteColours/alert-border": {
            "title": "$:/language/Docs/PaletteColours/alert-border",
            "text": "Alert border"
        },
        "$:/language/Docs/PaletteColours/alert-highlight": {
            "title": "$:/language/Docs/PaletteColours/alert-highlight",
            "text": "Alert highlight"
        },
        "$:/language/Docs/PaletteColours/alert-muted-foreground": {
            "title": "$:/language/Docs/PaletteColours/alert-muted-foreground",
            "text": "Alert muted foreground"
        },
        "$:/language/Docs/PaletteColours/background": {
            "title": "$:/language/Docs/PaletteColours/background",
            "text": "General background"
        },
        "$:/language/Docs/PaletteColours/blockquote-bar": {
            "title": "$:/language/Docs/PaletteColours/blockquote-bar",
            "text": "Blockquote bar"
        },
        "$:/language/Docs/PaletteColours/button-background": {
            "title": "$:/language/Docs/PaletteColours/button-background",
            "text": "Default button background"
        },
        "$:/language/Docs/PaletteColours/button-border": {
            "title": "$:/language/Docs/PaletteColours/button-border",
            "text": "Default button border"
        },
        "$:/language/Docs/PaletteColours/button-foreground": {
            "title": "$:/language/Docs/PaletteColours/button-foreground",
            "text": "Default button foreground"
        },
        "$:/language/Docs/PaletteColours/dirty-indicator": {
            "title": "$:/language/Docs/PaletteColours/dirty-indicator",
            "text": "Unsaved changes indicator"
        },
        "$:/language/Docs/PaletteColours/code-background": {
            "title": "$:/language/Docs/PaletteColours/code-background",
            "text": "Code background"
        },
        "$:/language/Docs/PaletteColours/code-border": {
            "title": "$:/language/Docs/PaletteColours/code-border",
            "text": "Code border"
        },
        "$:/language/Docs/PaletteColours/code-foreground": {
            "title": "$:/language/Docs/PaletteColours/code-foreground",
            "text": "Code foreground"
        },
        "$:/language/Docs/PaletteColours/download-background": {
            "title": "$:/language/Docs/PaletteColours/download-background",
            "text": "Download button background"
        },
        "$:/language/Docs/PaletteColours/download-foreground": {
            "title": "$:/language/Docs/PaletteColours/download-foreground",
            "text": "Download button foreground"
        },
        "$:/language/Docs/PaletteColours/dragger-background": {
            "title": "$:/language/Docs/PaletteColours/dragger-background",
            "text": "Dragger background"
        },
        "$:/language/Docs/PaletteColours/dragger-foreground": {
            "title": "$:/language/Docs/PaletteColours/dragger-foreground",
            "text": "Dragger foreground"
        },
        "$:/language/Docs/PaletteColours/dropdown-background": {
            "title": "$:/language/Docs/PaletteColours/dropdown-background",
            "text": "Dropdown background"
        },
        "$:/language/Docs/PaletteColours/dropdown-border": {
            "title": "$:/language/Docs/PaletteColours/dropdown-border",
            "text": "Dropdown border"
        },
        "$:/language/Docs/PaletteColours/dropdown-tab-background-selected": {
            "title": "$:/language/Docs/PaletteColours/dropdown-tab-background-selected",
            "text": "Dropdown tab background for selected tabs"
        },
        "$:/language/Docs/PaletteColours/dropdown-tab-background": {
            "title": "$:/language/Docs/PaletteColours/dropdown-tab-background",
            "text": "Dropdown tab background"
        },
        "$:/language/Docs/PaletteColours/dropzone-background": {
            "title": "$:/language/Docs/PaletteColours/dropzone-background",
            "text": "Dropzone background"
        },
        "$:/language/Docs/PaletteColours/external-link-background-hover": {
            "title": "$:/language/Docs/PaletteColours/external-link-background-hover",
            "text": "External link background hover"
        },
        "$:/language/Docs/PaletteColours/external-link-background-visited": {
            "title": "$:/language/Docs/PaletteColours/external-link-background-visited",
            "text": "External link background visited"
        },
        "$:/language/Docs/PaletteColours/external-link-background": {
            "title": "$:/language/Docs/PaletteColours/external-link-background",
            "text": "External link background"
        },
        "$:/language/Docs/PaletteColours/external-link-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/external-link-foreground-hover",
            "text": "External link foreground hover"
        },
        "$:/language/Docs/PaletteColours/external-link-foreground-visited": {
            "title": "$:/language/Docs/PaletteColours/external-link-foreground-visited",
            "text": "External link foreground visited"
        },
        "$:/language/Docs/PaletteColours/external-link-foreground": {
            "title": "$:/language/Docs/PaletteColours/external-link-foreground",
            "text": "External link foreground"
        },
        "$:/language/Docs/PaletteColours/foreground": {
            "title": "$:/language/Docs/PaletteColours/foreground",
            "text": "General foreground"
        },
        "$:/language/Docs/PaletteColours/message-background": {
            "title": "$:/language/Docs/PaletteColours/message-background",
            "text": "Message box background"
        },
        "$:/language/Docs/PaletteColours/message-border": {
            "title": "$:/language/Docs/PaletteColours/message-border",
            "text": "Message box border"
        },
        "$:/language/Docs/PaletteColours/message-foreground": {
            "title": "$:/language/Docs/PaletteColours/message-foreground",
            "text": "Message box foreground"
        },
        "$:/language/Docs/PaletteColours/modal-backdrop": {
            "title": "$:/language/Docs/PaletteColours/modal-backdrop",
            "text": "Modal backdrop"
        },
        "$:/language/Docs/PaletteColours/modal-background": {
            "title": "$:/language/Docs/PaletteColours/modal-background",
            "text": "Modal background"
        },
        "$:/language/Docs/PaletteColours/modal-border": {
            "title": "$:/language/Docs/PaletteColours/modal-border",
            "text": "Modal border"
        },
        "$:/language/Docs/PaletteColours/modal-footer-background": {
            "title": "$:/language/Docs/PaletteColours/modal-footer-background",
            "text": "Modal footer background"
        },
        "$:/language/Docs/PaletteColours/modal-footer-border": {
            "title": "$:/language/Docs/PaletteColours/modal-footer-border",
            "text": "Modal footer border"
        },
        "$:/language/Docs/PaletteColours/modal-header-border": {
            "title": "$:/language/Docs/PaletteColours/modal-header-border",
            "text": "Modal header border"
        },
        "$:/language/Docs/PaletteColours/muted-foreground": {
            "title": "$:/language/Docs/PaletteColours/muted-foreground",
            "text": "General muted foreground"
        },
        "$:/language/Docs/PaletteColours/notification-background": {
            "title": "$:/language/Docs/PaletteColours/notification-background",
            "text": "Notification background"
        },
        "$:/language/Docs/PaletteColours/notification-border": {
            "title": "$:/language/Docs/PaletteColours/notification-border",
            "text": "Notification border"
        },
        "$:/language/Docs/PaletteColours/page-background": {
            "title": "$:/language/Docs/PaletteColours/page-background",
            "text": "Page background"
        },
        "$:/language/Docs/PaletteColours/pre-background": {
            "title": "$:/language/Docs/PaletteColours/pre-background",
            "text": "Preformatted code background"
        },
        "$:/language/Docs/PaletteColours/pre-border": {
            "title": "$:/language/Docs/PaletteColours/pre-border",
            "text": "Preformatted code border"
        },
        "$:/language/Docs/PaletteColours/primary": {
            "title": "$:/language/Docs/PaletteColours/primary",
            "text": "General primary"
        },
        "$:/language/Docs/PaletteColours/sidebar-button-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-button-foreground",
            "text": "Sidebar button foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-controls-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/sidebar-controls-foreground-hover",
            "text": "Sidebar controls foreground hover"
        },
        "$:/language/Docs/PaletteColours/sidebar-controls-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-controls-foreground",
            "text": "Sidebar controls foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-foreground-shadow": {
            "title": "$:/language/Docs/PaletteColours/sidebar-foreground-shadow",
            "text": "Sidebar foreground shadow"
        },
        "$:/language/Docs/PaletteColours/sidebar-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-foreground",
            "text": "Sidebar foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-muted-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/sidebar-muted-foreground-hover",
            "text": "Sidebar muted foreground hover"
        },
        "$:/language/Docs/PaletteColours/sidebar-muted-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-muted-foreground",
            "text": "Sidebar muted foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-background-selected": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-background-selected",
            "text": "Sidebar tab background for selected tabs"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-background": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-background",
            "text": "Sidebar tab background"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-border-selected": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-border-selected",
            "text": "Sidebar tab border for selected tabs"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-border": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-border",
            "text": "Sidebar tab border"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-divider": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-divider",
            "text": "Sidebar tab divider"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-foreground-selected": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-foreground-selected",
            "text": "Sidebar tab foreground for selected tabs"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-foreground",
            "text": "Sidebar tab foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-tiddler-link-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tiddler-link-foreground-hover",
            "text": "Sidebar tiddler link foreground hover"
        },
        "$:/language/Docs/PaletteColours/sidebar-tiddler-link-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tiddler-link-foreground",
            "text": "Sidebar tiddler link foreground"
        },
        "$:/language/Docs/PaletteColours/site-title-foreground": {
            "title": "$:/language/Docs/PaletteColours/site-title-foreground",
            "text": "Site title foreground"
        },
        "$:/language/Docs/PaletteColours/static-alert-foreground": {
            "title": "$:/language/Docs/PaletteColours/static-alert-foreground",
            "text": "Static alert foreground"
        },
        "$:/language/Docs/PaletteColours/tab-background-selected": {
            "title": "$:/language/Docs/PaletteColours/tab-background-selected",
            "text": "Tab background for selected tabs"
        },
        "$:/language/Docs/PaletteColours/tab-background": {
            "title": "$:/language/Docs/PaletteColours/tab-background",
            "text": "Tab background"
        },
        "$:/language/Docs/PaletteColours/tab-border-selected": {
            "title": "$:/language/Docs/PaletteColours/tab-border-selected",
            "text": "Tab border for selected tabs"
        },
        "$:/language/Docs/PaletteColours/tab-border": {
            "title": "$:/language/Docs/PaletteColours/tab-border",
            "text": "Tab border"
        },
        "$:/language/Docs/PaletteColours/tab-divider": {
            "title": "$:/language/Docs/PaletteColours/tab-divider",
            "text": "Tab divider"
        },
        "$:/language/Docs/PaletteColours/tab-foreground-selected": {
            "title": "$:/language/Docs/PaletteColours/tab-foreground-selected",
            "text": "Tab foreground for selected tabs"
        },
        "$:/language/Docs/PaletteColours/tab-foreground": {
            "title": "$:/language/Docs/PaletteColours/tab-foreground",
            "text": "Tab foreground"
        },
        "$:/language/Docs/PaletteColours/table-border": {
            "title": "$:/language/Docs/PaletteColours/table-border",
            "text": "Table border"
        },
        "$:/language/Docs/PaletteColours/table-footer-background": {
            "title": "$:/language/Docs/PaletteColours/table-footer-background",
            "text": "Table footer background"
        },
        "$:/language/Docs/PaletteColours/table-header-background": {
            "title": "$:/language/Docs/PaletteColours/table-header-background",
            "text": "Table header background"
        },
        "$:/language/Docs/PaletteColours/tag-background": {
            "title": "$:/language/Docs/PaletteColours/tag-background",
            "text": "Tag background"
        },
        "$:/language/Docs/PaletteColours/tag-foreground": {
            "title": "$:/language/Docs/PaletteColours/tag-foreground",
            "text": "Tag foreground"
        },
        "$:/language/Docs/PaletteColours/tiddler-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-background",
            "text": "Tiddler background"
        },
        "$:/language/Docs/PaletteColours/tiddler-border": {
            "title": "$:/language/Docs/PaletteColours/tiddler-border",
            "text": "Tiddler border"
        },
        "$:/language/Docs/PaletteColours/tiddler-controls-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/tiddler-controls-foreground-hover",
            "text": "Tiddler controls foreground hover"
        },
        "$:/language/Docs/PaletteColours/tiddler-controls-foreground-selected": {
            "title": "$:/language/Docs/PaletteColours/tiddler-controls-foreground-selected",
            "text": "Tiddler controls foreground for selected controls"
        },
        "$:/language/Docs/PaletteColours/tiddler-controls-foreground": {
            "title": "$:/language/Docs/PaletteColours/tiddler-controls-foreground",
            "text": "Tiddler controls foreground"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-background",
            "text": "Tiddler editor background"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-border-image": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-border-image",
            "text": "Tiddler editor border image"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-border": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-border",
            "text": "Tiddler editor border"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-fields-even": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-fields-even",
            "text": "Tiddler editor background for even fields"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-fields-odd": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-fields-odd",
            "text": "Tiddler editor background for odd fields"
        },
        "$:/language/Docs/PaletteColours/tiddler-info-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-info-background",
            "text": "Tiddler info panel background"
        },
        "$:/language/Docs/PaletteColours/tiddler-info-border": {
            "title": "$:/language/Docs/PaletteColours/tiddler-info-border",
            "text": "Tiddler info panel border"
        },
        "$:/language/Docs/PaletteColours/tiddler-info-tab-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-info-tab-background",
            "text": "Tiddler info panel tab background"
        },
        "$:/language/Docs/PaletteColours/tiddler-link-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-link-background",
            "text": "Tiddler link background"
        },
        "$:/language/Docs/PaletteColours/tiddler-link-foreground": {
            "title": "$:/language/Docs/PaletteColours/tiddler-link-foreground",
            "text": "Tiddler link foreground"
        },
        "$:/language/Docs/PaletteColours/tiddler-subtitle-foreground": {
            "title": "$:/language/Docs/PaletteColours/tiddler-subtitle-foreground",
            "text": "Tiddler subtitle foreground"
        },
        "$:/language/Docs/PaletteColours/tiddler-title-foreground": {
            "title": "$:/language/Docs/PaletteColours/tiddler-title-foreground",
            "text": "Tiddler title foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-new-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-new-button",
            "text": "Toolbar 'new tiddler' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-options-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-options-button",
            "text": "Toolbar 'options' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-save-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-save-button",
            "text": "Toolbar 'save' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-info-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-info-button",
            "text": "Toolbar 'info' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-edit-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-edit-button",
            "text": "Toolbar 'edit' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-close-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-close-button",
            "text": "Toolbar 'close' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-delete-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-delete-button",
            "text": "Toolbar 'delete' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-cancel-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-cancel-button",
            "text": "Toolbar 'cancel' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-done-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-done-button",
            "text": "Toolbar 'done' button foreground"
        },
        "$:/language/Docs/PaletteColours/untagged-background": {
            "title": "$:/language/Docs/PaletteColours/untagged-background",
            "text": "Untagged pill background"
        },
        "$:/language/Docs/PaletteColours/very-muted-foreground": {
            "title": "$:/language/Docs/PaletteColours/very-muted-foreground",
            "text": "Very muted foreground"
        },
        "$:/language/EditTemplate/Body/External/Hint": {
            "title": "$:/language/EditTemplate/Body/External/Hint",
            "text": "This is an external tiddler stored outside of the main TiddlyWiki file. You can edit the tags and fields but cannot directly edit the content itself"
        },
        "$:/language/EditTemplate/Body/Placeholder": {
            "title": "$:/language/EditTemplate/Body/Placeholder",
            "text": "Type the text for this tiddler"
        },
        "$:/language/EditTemplate/Body/Preview/Type/Output": {
            "title": "$:/language/EditTemplate/Body/Preview/Type/Output",
            "text": "output"
        },
        "$:/language/EditTemplate/Field/Remove/Caption": {
            "title": "$:/language/EditTemplate/Field/Remove/Caption",
            "text": "remove field"
        },
        "$:/language/EditTemplate/Field/Remove/Hint": {
            "title": "$:/language/EditTemplate/Field/Remove/Hint",
            "text": "Remove field"
        },
        "$:/language/EditTemplate/Fields/Add/Button": {
            "title": "$:/language/EditTemplate/Fields/Add/Button",
            "text": "add"
        },
        "$:/language/EditTemplate/Fields/Add/Name/Placeholder": {
            "title": "$:/language/EditTemplate/Fields/Add/Name/Placeholder",
            "text": "field name"
        },
        "$:/language/EditTemplate/Fields/Add/Prompt": {
            "title": "$:/language/EditTemplate/Fields/Add/Prompt",
            "text": "Add a new field:"
        },
        "$:/language/EditTemplate/Fields/Add/Value/Placeholder": {
            "title": "$:/language/EditTemplate/Fields/Add/Value/Placeholder",
            "text": "field value"
        },
        "$:/language/EditTemplate/Fields/Add/Dropdown/System": {
            "title": "$:/language/EditTemplate/Fields/Add/Dropdown/System",
            "text": "System fields"
        },
        "$:/language/EditTemplate/Fields/Add/Dropdown/User": {
            "title": "$:/language/EditTemplate/Fields/Add/Dropdown/User",
            "text": "User fields"
        },
        "$:/language/EditTemplate/Shadow/Warning": {
            "title": "$:/language/EditTemplate/Shadow/Warning",
            "text": "This is a shadow tiddler. Any changes you make will override the default version from the plugin <<pluginLink>>"
        },
        "$:/language/EditTemplate/Shadow/OverriddenWarning": {
            "title": "$:/language/EditTemplate/Shadow/OverriddenWarning",
            "text": "This is a modified shadow tiddler. You can revert to the default version in the plugin <<pluginLink>> by deleting this tiddler"
        },
        "$:/language/EditTemplate/Tags/Add/Button": {
            "title": "$:/language/EditTemplate/Tags/Add/Button",
            "text": "add"
        },
        "$:/language/EditTemplate/Tags/Add/Placeholder": {
            "title": "$:/language/EditTemplate/Tags/Add/Placeholder",
            "text": "tag name"
        },
        "$:/language/EditTemplate/Tags/Dropdown/Caption": {
            "title": "$:/language/EditTemplate/Tags/Dropdown/Caption",
            "text": "tag list"
        },
        "$:/language/EditTemplate/Tags/Dropdown/Hint": {
            "title": "$:/language/EditTemplate/Tags/Dropdown/Hint",
            "text": "Show tag list"
        },
        "$:/language/EditTemplate/Title/BadCharacterWarning": {
            "title": "$:/language/EditTemplate/Title/BadCharacterWarning",
            "text": "Warning: avoid using any of the characters <<bad-chars>> in tiddler titles"
        },
        "$:/language/EditTemplate/Type/Dropdown/Caption": {
            "title": "$:/language/EditTemplate/Type/Dropdown/Caption",
            "text": "content type list"
        },
        "$:/language/EditTemplate/Type/Dropdown/Hint": {
            "title": "$:/language/EditTemplate/Type/Dropdown/Hint",
            "text": "Show content type list"
        },
        "$:/language/EditTemplate/Type/Delete/Caption": {
            "title": "$:/language/EditTemplate/Type/Delete/Caption",
            "text": "delete content type"
        },
        "$:/language/EditTemplate/Type/Delete/Hint": {
            "title": "$:/language/EditTemplate/Type/Delete/Hint",
            "text": "Delete content type"
        },
        "$:/language/EditTemplate/Type/Placeholder": {
            "title": "$:/language/EditTemplate/Type/Placeholder",
            "text": "content type"
        },
        "$:/language/EditTemplate/Type/Prompt": {
            "title": "$:/language/EditTemplate/Type/Prompt",
            "text": "Type:"
        },
        "$:/language/Exporters/StaticRiver": {
            "title": "$:/language/Exporters/StaticRiver",
            "text": "Static HTML"
        },
        "$:/language/Exporters/JsonFile": {
            "title": "$:/language/Exporters/JsonFile",
            "text": "JSON file"
        },
        "$:/language/Exporters/CsvFile": {
            "title": "$:/language/Exporters/CsvFile",
            "text": "CSV file"
        },
        "$:/language/Exporters/TidFile": {
            "title": "$:/language/Exporters/TidFile",
            "text": "\".tid\" file"
        },
        "$:/language/Docs/Fields/_canonical_uri": {
            "title": "$:/language/Docs/Fields/_canonical_uri",
            "text": "The full URI of an external image tiddler"
        },
        "$:/language/Docs/Fields/bag": {
            "title": "$:/language/Docs/Fields/bag",
            "text": "The name of the bag from which a tiddler came"
        },
        "$:/language/Docs/Fields/caption": {
            "title": "$:/language/Docs/Fields/caption",
            "text": "The text to be displayed on a tab or button"
        },
        "$:/language/Docs/Fields/color": {
            "title": "$:/language/Docs/Fields/color",
            "text": "The CSS color value associated with a tiddler"
        },
        "$:/language/Docs/Fields/component": {
            "title": "$:/language/Docs/Fields/component",
            "text": "The name of the component responsible for an [[alert tiddler|AlertMechanism]]"
        },
        "$:/language/Docs/Fields/current-tiddler": {
            "title": "$:/language/Docs/Fields/current-tiddler",
            "text": "Used to cache the top tiddler in a [[history list|HistoryMechanism]]"
        },
        "$:/language/Docs/Fields/created": {
            "title": "$:/language/Docs/Fields/created",
            "text": "The date a tiddler was created"
        },
        "$:/language/Docs/Fields/creator": {
            "title": "$:/language/Docs/Fields/creator",
            "text": "The name of the person who created a tiddler"
        },
        "$:/language/Docs/Fields/dependents": {
            "title": "$:/language/Docs/Fields/dependents",
            "text": "For a plugin, lists the dependent plugin titles"
        },
        "$:/language/Docs/Fields/description": {
            "title": "$:/language/Docs/Fields/description",
            "text": "The descriptive text for a plugin, or a modal dialogue"
        },
        "$:/language/Docs/Fields/draft.of": {
            "title": "$:/language/Docs/Fields/draft.of",
            "text": "For draft tiddlers, contains the title of the tiddler of which this is a draft"
        },
        "$:/language/Docs/Fields/draft.title": {
            "title": "$:/language/Docs/Fields/draft.title",
            "text": "For draft tiddlers, contains the proposed new title of the tiddler"
        },
        "$:/language/Docs/Fields/footer": {
            "title": "$:/language/Docs/Fields/footer",
            "text": "The footer text for a wizard"
        },
        "$:/language/Docs/Fields/hack-to-give-us-something-to-compare-against": {
            "title": "$:/language/Docs/Fields/hack-to-give-us-something-to-compare-against",
            "text": "A temporary storage field used in [[$:/core/templates/static.content]]"
        },
        "$:/language/Docs/Fields/icon": {
            "title": "$:/language/Docs/Fields/icon",
            "text": "The title of the tiddler containing the icon associated with a tiddler"
        },
        "$:/language/Docs/Fields/library": {
            "title": "$:/language/Docs/Fields/library",
            "text": "If set to \"yes\" indicates that a tiddler should be saved as a JavaScript library"
        },
        "$:/language/Docs/Fields/list": {
            "title": "$:/language/Docs/Fields/list",
            "text": "An ordered list of tiddler titles associated with a tiddler"
        },
        "$:/language/Docs/Fields/list-before": {
            "title": "$:/language/Docs/Fields/list-before",
            "text": "If set, the title of a tiddler before which this tiddler should be added to the ordered list of tiddler titles, or at the start of the list if this field is present but empty"
        },
        "$:/language/Docs/Fields/list-after": {
            "title": "$:/language/Docs/Fields/list-after",
            "text": "If set, the title of the tiddler after which this tiddler should be added to the ordered list of tiddler titles"
        },
        "$:/language/Docs/Fields/modified": {
            "title": "$:/language/Docs/Fields/modified",
            "text": "The date and time at which a tiddler was last modified"
        },
        "$:/language/Docs/Fields/modifier": {
            "title": "$:/language/Docs/Fields/modifier",
            "text": "The tiddler title associated with the person who last modified a tiddler"
        },
        "$:/language/Docs/Fields/name": {
            "title": "$:/language/Docs/Fields/name",
            "text": "The human readable name associated with a plugin tiddler"
        },
        "$:/language/Docs/Fields/plugin-priority": {
            "title": "$:/language/Docs/Fields/plugin-priority",
            "text": "A numerical value indicating the priority of a plugin tiddler"
        },
        "$:/language/Docs/Fields/plugin-type": {
            "title": "$:/language/Docs/Fields/plugin-type",
            "text": "The type of plugin in a plugin tiddler"
        },
        "$:/language/Docs/Fields/revision": {
            "title": "$:/language/Docs/Fields/revision",
            "text": "The revision of the tiddler held at the server"
        },
        "$:/language/Docs/Fields/released": {
            "title": "$:/language/Docs/Fields/released",
            "text": "Date of a TiddlyWiki release"
        },
        "$:/language/Docs/Fields/source": {
            "title": "$:/language/Docs/Fields/source",
            "text": "The source URL associated with a tiddler"
        },
        "$:/language/Docs/Fields/subtitle": {
            "title": "$:/language/Docs/Fields/subtitle",
            "text": "The subtitle text for a wizard"
        },
        "$:/language/Docs/Fields/tags": {
            "title": "$:/language/Docs/Fields/tags",
            "text": "A list of tags associated with a tiddler"
        },
        "$:/language/Docs/Fields/text": {
            "title": "$:/language/Docs/Fields/text",
            "text": "The body text of a tiddler"
        },
        "$:/language/Docs/Fields/title": {
            "title": "$:/language/Docs/Fields/title",
            "text": "The unique name of a tiddler"
        },
        "$:/language/Docs/Fields/type": {
            "title": "$:/language/Docs/Fields/type",
            "text": "The content type of a tiddler"
        },
        "$:/language/Docs/Fields/version": {
            "title": "$:/language/Docs/Fields/version",
            "text": "Version information for a plugin"
        },
        "$:/language/Filters/AllTiddlers": {
            "title": "$:/language/Filters/AllTiddlers",
            "text": "All tiddlers except system tiddlers"
        },
        "$:/language/Filters/RecentSystemTiddlers": {
            "title": "$:/language/Filters/RecentSystemTiddlers",
            "text": "Recently modified tiddlers, including system tiddlers"
        },
        "$:/language/Filters/RecentTiddlers": {
            "title": "$:/language/Filters/RecentTiddlers",
            "text": "Recently modified tiddlers"
        },
        "$:/language/Filters/AllTags": {
            "title": "$:/language/Filters/AllTags",
            "text": "All tags except system tags"
        },
        "$:/language/Filters/Missing": {
            "title": "$:/language/Filters/Missing",
            "text": "Missing tiddlers"
        },
        "$:/language/Filters/Drafts": {
            "title": "$:/language/Filters/Drafts",
            "text": "Draft tiddlers"
        },
        "$:/language/Filters/Orphans": {
            "title": "$:/language/Filters/Orphans",
            "text": "Orphan tiddlers"
        },
        "$:/language/Filters/SystemTiddlers": {
            "title": "$:/language/Filters/SystemTiddlers",
            "text": "System tiddlers"
        },
        "$:/language/Filters/ShadowTiddlers": {
            "title": "$:/language/Filters/ShadowTiddlers",
            "text": "Shadow tiddlers"
        },
        "$:/language/Filters/OverriddenShadowTiddlers": {
            "title": "$:/language/Filters/OverriddenShadowTiddlers",
            "text": "Overridden shadow tiddlers"
        },
        "$:/language/Filters/SystemTags": {
            "title": "$:/language/Filters/SystemTags",
            "text": "System tags"
        },
        "$:/language/Filters/TypedTiddlers": {
            "title": "$:/language/Filters/TypedTiddlers",
            "text": "Non wiki-text tiddlers"
        },
        "GettingStarted": {
            "title": "GettingStarted",
            "text": "\\define lingo-base() $:/language/ControlPanel/Basics/\nWelcome to ~TiddlyWiki and the ~TiddlyWiki community\n\nBefore you start storing important information in ~TiddlyWiki it is important to make sure that you can reliably save changes. See http://tiddlywiki.com/#GettingStarted for details\n\n!! Set up this ~TiddlyWiki\n\n<div class=\"tc-control-panel\">\n\n|<$link to=\"$:/SiteTitle\"><<lingo Title/Prompt>></$link> |<$edit-text tiddler=\"$:/SiteTitle\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/SiteSubtitle\"><<lingo Subtitle/Prompt>></$link> |<$edit-text tiddler=\"$:/SiteSubtitle\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/DefaultTiddlers\"><<lingo DefaultTiddlers/Prompt>></$link> |<<lingo DefaultTiddlers/TopHint>><br> <$edit tag=\"textarea\" tiddler=\"$:/DefaultTiddlers\"/><br>//<<lingo DefaultTiddlers/BottomHint>>// |\n</div>\n\nSee the [[control panel|$:/ControlPanel]] for more options.\n"
        },
        "$:/language/Help/build": {
            "title": "$:/language/Help/build",
            "description": "Automatically run configured commands",
            "text": "Build the specified build targets for the current wiki. If no build targets are specified then all available targets will be built.\n\n```\n--build <target> [<target> ...]\n```\n\nBuild targets are defined in the `tiddlywiki.info` file of a wiki folder.\n\n"
        },
        "$:/language/Help/clearpassword": {
            "title": "$:/language/Help/clearpassword",
            "description": "Clear a password for subsequent crypto operations",
            "text": "Clear the password for subsequent crypto operations\n\n```\n--clearpassword\n```\n"
        },
        "$:/language/Help/default": {
            "title": "$:/language/Help/default",
            "text": "\\define commandTitle()\n$:/language/Help/$(command)$\n\\end\n```\nusage: tiddlywiki [<wikifolder>] [--<command> [<args>...]...]\n```\n\nAvailable commands:\n\n<ul>\n<$list filter=\"[commands[]sort[title]]\" variable=\"command\">\n<li><$link to=<<commandTitle>>><$macrocall $name=\"command\" $type=\"text/plain\" $output=\"text/plain\"/></$link>: <$transclude tiddler=<<commandTitle>> field=\"description\"/></li>\n</$list>\n</ul>\n\nTo get detailed help on a command:\n\n```\ntiddlywiki --help <command>\n```\n"
        },
        "$:/language/Help/editions": {
            "title": "$:/language/Help/editions",
            "description": "Lists the available editions of TiddlyWiki",
            "text": "Lists the names and descriptions of the available editions. You can create a new wiki of a specified edition with the `--init` command.\n\n```\n--editions\n```\n"
        },
        "$:/language/Help/help": {
            "title": "$:/language/Help/help",
            "description": "Display help for TiddlyWiki commands",
            "text": "Displays help text for a command:\n\n```\n--help [<command>]\n```\n\nIf the command name is omitted then a list of available commands is displayed.\n"
        },
        "$:/language/Help/init": {
            "title": "$:/language/Help/init",
            "description": "Initialise a new wiki folder",
            "text": "Initialise an empty [[WikiFolder|WikiFolders]] with a copy of the specified edition.\n\n```\n--init <edition> [<edition> ...]\n```\n\nFor example:\n\n```\ntiddlywiki ./MyWikiFolder --init empty\n```\n\nNote:\n\n* The wiki folder directory will be created if necessary\n* The \"edition\" defaults to ''empty''\n* The init command will fail if the wiki folder is not empty\n* The init command removes any `includeWikis` definitions in the edition's `tiddlywiki.info` file\n* When multiple editions are specified, editions initialised later will overwrite any files shared with earlier editions (so, the final `tiddlywiki.info` file will be copied from the last edition)\n* `--editions` returns a list of available editions\n"
        },
        "$:/language/Help/load": {
            "title": "$:/language/Help/load",
            "description": "Load tiddlers from a file",
            "text": "Load tiddlers from 2.x.x TiddlyWiki files (`.html`), `.tiddler`, `.tid`, `.json` or other files\n\n```\n--load <filepath>\n```\n\nTo load tiddlers from an encrypted TiddlyWiki file you should first specify the password with the PasswordCommand. For example:\n\n```\ntiddlywiki ./MyWiki --password pa55w0rd --load my_encrypted_wiki.html\n```\n\nNote that TiddlyWiki will not load an older version of an already loaded plugin.\n"
        },
        "$:/language/Help/makelibrary": {
            "title": "$:/language/Help/makelibrary",
            "description": "Construct library plugin required by upgrade process",
            "text": "Constructs the `$:/UpgradeLibrary` tiddler for the upgrade process.\n\nThe upgrade library is formatted as an ordinary plugin tiddler with the plugin type `library`. It contains a copy of each of the plugins, themes and language packs available within the TiddlyWiki5 repository.\n\nThis command is intended for internal use; it is only relevant to users constructing a custom upgrade procedure.\n\n```\n--makelibrary <title>\n```\n\nThe title argument defaults to `$:/UpgradeLibrary`.\n"
        },
        "$:/language/Help/notfound": {
            "title": "$:/language/Help/notfound",
            "text": "No such help item"
        },
        "$:/language/Help/output": {
            "title": "$:/language/Help/output",
            "description": "Set the base output directory for subsequent commands",
            "text": "Sets the base output directory for subsequent commands. The default output directory is the `output` subdirectory of the edition directory.\n\n```\n--output <pathname>\n```\n\nIf the specified pathname is relative then it is resolved relative to the current working directory. For example `--output .` sets the output directory to the current working directory.\n\n"
        },
        "$:/language/Help/password": {
            "title": "$:/language/Help/password",
            "description": "Set a password for subsequent crypto operations",
            "text": "Set a password for subsequent crypto operations\n\n```\n--password <password>\n```\n\n''Note'': This should not be used for serving TiddlyWiki with password protection. Instead, see the password option under the [[ServerCommand]].\n"
        },
        "$:/language/Help/rendertiddler": {
            "title": "$:/language/Help/rendertiddler",
            "description": "Render an individual tiddler as a specified ContentType",
            "text": "Render an individual tiddler as a specified ContentType, defaulting to `text/html` and save it to the specified filename. Optionally a template can be specified, in which case the template tiddler is rendered with the \"currentTiddler\" variable set to the tiddler that is being rendered (the first parameter value).\n\n```\n--rendertiddler <title> <filename> [<type>] [<template>]\n```\n\nBy default, the filename is resolved relative to the `output` subdirectory of the edition directory. The `--output` command can be used to direct output to a different directory.\n\nAny missing directories in the path to the filename are automatically created.\n"
        },
        "$:/language/Help/rendertiddlers": {
            "title": "$:/language/Help/rendertiddlers",
            "description": "Render tiddlers matching a filter to a specified ContentType",
            "text": "Render a set of tiddlers matching a filter to separate files of a specified ContentType (defaults to `text/html`) and extension (defaults to `.html`).\n\n```\n--rendertiddlers <filter> <template> <pathname> [<type>] [<extension>] [\"noclean\"]\n```\n\nFor example:\n\n```\n--rendertiddlers [!is[system]] $:/core/templates/static.tiddler.html ./static text/plain\n```\n\nBy default, the pathname is resolved relative to the `output` subdirectory of the edition directory. The `--output` command can be used to direct output to a different directory.\n\nAny files in the target directory are deleted unless the ''noclean'' flag is specified. The target directory is recursively created if it is missing.\n"
        },
        "$:/language/Help/savetiddler": {
            "title": "$:/language/Help/savetiddler",
            "description": "Saves a raw tiddler to a file",
            "text": "Saves an individual tiddler in its raw text or binary format to the specified filename.\n\n```\n--savetiddler <title> <filename>\n```\n\nBy default, the filename is resolved relative to the `output` subdirectory of the edition directory. The `--output` command can be used to direct output to a different directory.\n\nAny missing directories in the path to the filename are automatically created.\n"
        },
        "$:/language/Help/savetiddlers": {
            "title": "$:/language/Help/savetiddlers",
            "description": "Saves a group of raw tiddlers to a directory",
            "text": "Saves a group of tiddlers in their raw text or binary format to the specified directory.\n\n```\n--savetiddlers <filter> <pathname> [\"noclean\"]\n```\n\nBy default, the pathname is resolved relative to the `output` subdirectory of the edition directory. The `--output` command can be used to direct output to a different directory.\n\nThe output directory is cleared of existing files before saving the specified files. The deletion can be disabled by specifying the ''noclean'' flag.\n\nAny missing directories in the pathname are automatically created.\n"
        },
        "$:/language/Help/server": {
            "title": "$:/language/Help/server",
            "description": "Provides an HTTP server interface to TiddlyWiki",
            "text": "The server built in to TiddlyWiki5 is very simple. Although compatible with TiddlyWeb it doesn't support many of the features needed for robust Internet-facing usage.\n\nAt the root, it serves a rendering of a specified tiddler. Away from the root, it serves individual tiddlers encoded in JSON, and supports the basic HTTP operations for `GET`, `PUT` and `DELETE`.\n\n```\n--server <port> <roottiddler> <rendertype> <servetype> <username> <password> <host> <pathprefix>\n```\n\nThe parameters are:\n\n* ''port'' - port number to serve from (defaults to \"8080\")\n* ''roottiddler'' - the tiddler to serve at the root (defaults to \"$:/core/save/all\")\n* ''rendertype'' - the content type to which the root tiddler should be rendered (defaults to \"text/plain\")\n* ''servetype'' - the content type with which the root tiddler should be served (defaults to \"text/html\")\n* ''username'' - the default username for signing edits\n* ''password'' - optional password for basic authentication\n* ''host'' - optional hostname to serve from (defaults to \"127.0.0.1\" aka \"localhost\")\n* ''pathprefix'' - optional prefix for paths\n\nIf the password parameter is specified then the browser will prompt the user for the username and password. Note that the password is transmitted in plain text so this implementation isn't suitable for general use.\n\nFor example:\n\n```\n--server 8080 $:/core/save/all text/plain text/html MyUserName passw0rd\n```\n\nThe username and password can be specified as empty strings if you need to set the hostname or pathprefix and don't want to require a password:\n\n```\n--server 8080 $:/core/save/all text/plain text/html \"\" \"\" 192.168.0.245\n```\n\nTo run multiple TiddlyWiki servers at the same time you'll need to put each one on a different port.\n"
        },
        "$:/language/Help/setfield": {
            "title": "$:/language/Help/setfield",
            "description": "Prepares external tiddlers for use",
            "text": "//Note that this command is experimental and may change or be replaced before being finalised//\n\nSets the specified field of a group of tiddlers to the result of wikifying a template tiddler with the `currentTiddler` variable set to the tiddler.\n\n```\n--setfield <filter> <fieldname> <templatetitle> <rendertype>\n```\n\nThe parameters are:\n\n* ''filter'' - filter identifying the tiddlers to be affected\n* ''fieldname'' - the field to modify (defaults to \"text\")\n* ''templatetitle'' - the tiddler to wikify into the specified field. If blank or missing then the specified field is deleted\n* ''rendertype'' - the text type to render (defaults to \"text/plain\"; \"text/html\" can be used to include HTML tags)\n"
        },
        "$:/language/Help/unpackplugin": {
            "title": "$:/language/Help/unpackplugin",
            "description": "Unpack the payload tiddlers from a plugin",
            "text": "Extract the payload tiddlers from a plugin, creating them as ordinary tiddlers:\n\n```\n--unpackplugin <title>\n```\n"
        },
        "$:/language/Help/verbose": {
            "title": "$:/language/Help/verbose",
            "description": "Triggers verbose output mode",
            "text": "Triggers verbose output, useful for debugging\n\n```\n--verbose\n```\n"
        },
        "$:/language/Help/version": {
            "title": "$:/language/Help/version",
            "description": "Displays the version number of TiddlyWiki",
            "text": "Displays the version number of TiddlyWiki.\n\n```\n--version\n```\n"
        },
        "$:/languages/en-GB/icon": {
            "title": "$:/languages/en-GB/icon",
            "type": "image/svg+xml",
            "text": "<svg xmlns=\"http://www.w3.org/2000/svg\" viewBox=\"0 0 60 30\" width=\"1200\" height=\"600\">\n<clipPath id=\"t\">\n\t<path d=\"M30,15 h30 v15 z v15 h-30 z h-30 v-15 z v-15 h30 z\"/>\n</clipPath>\n<path d=\"M0,0 v30 h60 v-30 z\" fill=\"#00247d\"/>\n<path d=\"M0,0 L60,30 M60,0 L0,30\" stroke=\"#fff\" stroke-width=\"6\"/>\n<path d=\"M0,0 L60,30 M60,0 L0,30\" clip-path=\"url(#t)\" stroke=\"#cf142b\" stroke-width=\"4\"/>\n<path d=\"M30,0 v30 M0,15 h60\" stroke=\"#fff\" stroke-width=\"10\"/>\n<path d=\"M30,0 v30 M0,15 h60\" stroke=\"#cf142b\" stroke-width=\"6\"/>\n</svg>\n"
        },
        "$:/language/Import/Imported/Hint": {
            "title": "$:/language/Import/Imported/Hint",
            "text": "The following tiddlers were imported:"
        },
        "$:/language/Import/Listing/Cancel/Caption": {
            "title": "$:/language/Import/Listing/Cancel/Caption",
            "text": "Cancel"
        },
        "$:/language/Import/Listing/Hint": {
            "title": "$:/language/Import/Listing/Hint",
            "text": "These tiddlers are ready to import:"
        },
        "$:/language/Import/Listing/Import/Caption": {
            "title": "$:/language/Import/Listing/Import/Caption",
            "text": "Import"
        },
        "$:/language/Import/Listing/Select/Caption": {
            "title": "$:/language/Import/Listing/Select/Caption",
            "text": "Select"
        },
        "$:/language/Import/Listing/Status/Caption": {
            "title": "$:/language/Import/Listing/Status/Caption",
            "text": "Status"
        },
        "$:/language/Import/Listing/Title/Caption": {
            "title": "$:/language/Import/Listing/Title/Caption",
            "text": "Title"
        },
        "$:/language/Import/Upgrader/Plugins/Suppressed/Incompatible": {
            "title": "$:/language/Import/Upgrader/Plugins/Suppressed/Incompatible",
            "text": "Blocked incompatible or obsolete plugin"
        },
        "$:/language/Import/Upgrader/Plugins/Suppressed/Version": {
            "title": "$:/language/Import/Upgrader/Plugins/Suppressed/Version",
            "text": "Blocked plugin (due to incoming <<incoming>> being older than existing <<existing>>)"
        },
        "$:/language/Import/Upgrader/Plugins/Upgraded": {
            "title": "$:/language/Import/Upgrader/Plugins/Upgraded",
            "text": "Upgraded plugin from <<incoming>> to <<upgraded>>"
        },
        "$:/language/Import/Upgrader/State/Suppressed": {
            "title": "$:/language/Import/Upgrader/State/Suppressed",
            "text": "Blocked temporary state tiddler"
        },
        "$:/language/Import/Upgrader/System/Suppressed": {
            "title": "$:/language/Import/Upgrader/System/Suppressed",
            "text": "Blocked system tiddler"
        },
        "$:/language/Import/Upgrader/ThemeTweaks/Created": {
            "title": "$:/language/Import/Upgrader/ThemeTweaks/Created",
            "text": "Migrated theme tweak from <$text text=<<from>>/>"
        },
        "$:/language/AboveStory/ClassicPlugin/Warning": {
            "title": "$:/language/AboveStory/ClassicPlugin/Warning",
            "text": "It looks like you are trying to load a plugin designed for ~TiddlyWiki Classic. Please note that [[these plugins do not work with TiddlyWiki version 5.x.x|http://tiddlywiki.com/#TiddlyWikiClassic]]. ~TiddlyWiki Classic plugins detected:"
        },
        "$:/language/BinaryWarning/Prompt": {
            "title": "$:/language/BinaryWarning/Prompt",
            "text": "This tiddler contains binary data"
        },
        "$:/language/ClassicWarning/Hint": {
            "title": "$:/language/ClassicWarning/Hint",
            "text": "This tiddler is written in TiddlyWiki Classic wiki text format, which is not fully compatible with TiddlyWiki version 5. See http://tiddlywiki.com/static/Upgrading.html for more details."
        },
        "$:/language/ClassicWarning/Upgrade/Caption": {
            "title": "$:/language/ClassicWarning/Upgrade/Caption",
            "text": "upgrade"
        },
        "$:/language/CloseAll/Button": {
            "title": "$:/language/CloseAll/Button",
            "text": "close all"
        },
        "$:/language/ColourPicker/Recent": {
            "title": "$:/language/ColourPicker/Recent",
            "text": "Recent:"
        },
        "$:/language/ConfirmCancelTiddler": {
            "title": "$:/language/ConfirmCancelTiddler",
            "text": "Do you wish to discard changes to the tiddler \"<$text text=<<title>>/>\"?"
        },
        "$:/language/ConfirmDeleteTiddler": {
            "title": "$:/language/ConfirmDeleteTiddler",
            "text": "Do you wish to delete the tiddler \"<$text text=<<title>>/>\"?"
        },
        "$:/language/ConfirmOverwriteTiddler": {
            "title": "$:/language/ConfirmOverwriteTiddler",
            "text": "Do you wish to overwrite the tiddler \"<$text text=<<title>>/>\"?"
        },
        "$:/language/ConfirmEditShadowTiddler": {
            "title": "$:/language/ConfirmEditShadowTiddler",
            "text": "You are about to edit a ShadowTiddler. Any changes will override the default system making future upgrades non-trivial. Are you sure you want to edit \"<$text text=<<title>>/>\"?"
        },
        "$:/language/Count": {
            "title": "$:/language/Count",
            "text": "count"
        },
        "$:/language/DefaultNewTiddlerTitle": {
            "title": "$:/language/DefaultNewTiddlerTitle",
            "text": "New Tiddler"
        },
        "$:/language/DropMessage": {
            "title": "$:/language/DropMessage",
            "text": "Drop here (or use the 'Escape' key to cancel)"
        },
        "$:/language/Encryption/Cancel": {
            "title": "$:/language/Encryption/Cancel",
            "text": "Cancel"
        },
        "$:/language/Encryption/ConfirmClearPassword": {
            "title": "$:/language/Encryption/ConfirmClearPassword",
            "text": "Do you wish to clear the password? This will remove the encryption applied when saving this wiki"
        },
        "$:/language/Encryption/PromptSetPassword": {
            "title": "$:/language/Encryption/PromptSetPassword",
            "text": "Set a new password for this TiddlyWiki"
        },
        "$:/language/Encryption/Username": {
            "title": "$:/language/Encryption/Username",
            "text": "Username"
        },
        "$:/language/Encryption/Password": {
            "title": "$:/language/Encryption/Password",
            "text": "Password"
        },
        "$:/language/Encryption/RepeatPassword": {
            "title": "$:/language/Encryption/RepeatPassword",
            "text": "Repeat password"
        },
        "$:/language/Encryption/PasswordNoMatch": {
            "title": "$:/language/Encryption/PasswordNoMatch",
            "text": "Passwords do not match"
        },
        "$:/language/Encryption/SetPassword": {
            "title": "$:/language/Encryption/SetPassword",
            "text": "Set password"
        },
        "$:/language/Error/Caption": {
            "title": "$:/language/Error/Caption",
            "text": "Error"
        },
        "$:/language/Error/Filter": {
            "title": "$:/language/Error/Filter",
            "text": "Filter error"
        },
        "$:/language/Error/FilterSyntax": {
            "title": "$:/language/Error/FilterSyntax",
            "text": "Syntax error in filter expression"
        },
        "$:/language/Error/IsFilterOperator": {
            "title": "$:/language/Error/IsFilterOperator",
            "text": "Filter Error: Unknown operand for the 'is' filter operator"
        },
        "$:/language/Error/LoadingPluginLibrary": {
            "title": "$:/language/Error/LoadingPluginLibrary",
            "text": "Error loading plugin library"
        },
        "$:/language/Error/RecursiveTransclusion": {
            "title": "$:/language/Error/RecursiveTransclusion",
            "text": "Recursive transclusion error in transclude widget"
        },
        "$:/language/Error/RetrievingSkinny": {
            "title": "$:/language/Error/RetrievingSkinny",
            "text": "Error retrieving skinny tiddler list"
        },
        "$:/language/Error/SavingToTWEdit": {
            "title": "$:/language/Error/SavingToTWEdit",
            "text": "Error saving to TWEdit"
        },
        "$:/language/Error/WhileSaving": {
            "title": "$:/language/Error/WhileSaving",
            "text": "Error while saving"
        },
        "$:/language/Error/XMLHttpRequest": {
            "title": "$:/language/Error/XMLHttpRequest",
            "text": "XMLHttpRequest error code"
        },
        "$:/language/InternalJavaScriptError/Title": {
            "title": "$:/language/InternalJavaScriptError/Title",
            "text": "Internal JavaScript Error"
        },
        "$:/language/InternalJavaScriptError/Hint": {
            "title": "$:/language/InternalJavaScriptError/Hint",
            "text": "Well, this is embarrassing. It is recommended that you restart TiddlyWiki by refreshing your browser"
        },
        "$:/language/InvalidFieldName": {
            "title": "$:/language/InvalidFieldName",
            "text": "Illegal characters in field name \"<$text text=<<fieldName>>/>\". Fields can only contain lowercase letters, digits and the characters underscore (`_`), hyphen (`-`) and period (`.`)"
        },
        "$:/language/LazyLoadingWarning": {
            "title": "$:/language/LazyLoadingWarning",
            "text": "<p>Loading external text from ''<$text text={{!!_canonical_uri}}/>''</p><p>If this message doesn't disappear you may be using a browser that doesn't support external text in this configuration. See http://tiddlywiki.com/#ExternalText</p>"
        },
        "$:/language/LoginToTiddlySpace": {
            "title": "$:/language/LoginToTiddlySpace",
            "text": "Login to TiddlySpace"
        },
        "$:/language/MissingTiddler/Hint": {
            "title": "$:/language/MissingTiddler/Hint",
            "text": "Missing tiddler \"<$text text=<<currentTiddler>>/>\" - click {{$:/core/images/edit-button}} to create"
        },
        "$:/language/No": {
            "title": "$:/language/No",
            "text": "No"
        },
        "$:/language/OfficialPluginLibrary": {
            "title": "$:/language/OfficialPluginLibrary",
            "text": "Official ~TiddlyWiki Plugin Library"
        },
        "$:/language/OfficialPluginLibrary/Hint": {
            "title": "$:/language/OfficialPluginLibrary/Hint",
            "text": "The official ~TiddlyWiki plugin library at tiddlywiki.com. Plugins, themes and language packs are maintained by the core team."
        },
        "$:/language/PluginReloadWarning": {
            "title": "$:/language/PluginReloadWarning",
            "text": "Please save {{$:/core/ui/Buttons/save-wiki}} and reload {{$:/core/ui/Buttons/refresh}} to allow changes to plugins to take effect"
        },
        "$:/language/RecentChanges/DateFormat": {
            "title": "$:/language/RecentChanges/DateFormat",
            "text": "DDth MMM YYYY"
        },
        "$:/language/SystemTiddler/Tooltip": {
            "title": "$:/language/SystemTiddler/Tooltip",
            "text": "This is a system tiddler"
        },
        "$:/language/TagManager/Colour/Heading": {
            "title": "$:/language/TagManager/Colour/Heading",
            "text": "Colour"
        },
        "$:/language/TagManager/Count/Heading": {
            "title": "$:/language/TagManager/Count/Heading",
            "text": "Count"
        },
        "$:/language/TagManager/Icon/Heading": {
            "title": "$:/language/TagManager/Icon/Heading",
            "text": "Icon"
        },
        "$:/language/TagManager/Info/Heading": {
            "title": "$:/language/TagManager/Info/Heading",
            "text": "Info"
        },
        "$:/language/TagManager/Tag/Heading": {
            "title": "$:/language/TagManager/Tag/Heading",
            "text": "Tag"
        },
        "$:/language/Tiddler/DateFormat": {
            "title": "$:/language/Tiddler/DateFormat",
            "text": "DDth MMM YYYY at hh12:0mmam"
        },
        "$:/language/UnsavedChangesWarning": {
            "title": "$:/language/UnsavedChangesWarning",
            "text": "You have unsaved changes in TiddlyWiki"
        },
        "$:/language/Yes": {
            "title": "$:/language/Yes",
            "text": "Yes"
        },
        "$:/language/Modals/Download": {
            "title": "$:/language/Modals/Download",
            "type": "text/vnd.tiddlywiki",
            "subtitle": "Download changes",
            "footer": "<$button message=\"tm-close-tiddler\">Close</$button>",
            "help": "http://tiddlywiki.com/static/DownloadingChanges.html",
            "text": "Your browser only supports manual saving.\n\nTo save your modified wiki, right click on the download link below and select \"Download file\" or \"Save file\", and then choose the folder and filename.\n\n//You can marginally speed things up by clicking the link with the control key (Windows) or the options/alt key (Mac OS X). You will not be prompted for the folder or filename, but your browser is likely to give it an unrecognisable name -- you may need to rename the file to include an `.html` extension before you can do anything useful with it.//\n\nOn smartphones that do not allow files to be downloaded you can instead bookmark the link, and then sync your bookmarks to a desktop computer from where the wiki can be saved normally.\n"
        },
        "$:/language/Modals/SaveInstructions": {
            "title": "$:/language/Modals/SaveInstructions",
            "type": "text/vnd.tiddlywiki",
            "subtitle": "Save your work",
            "footer": "<$button message=\"tm-close-tiddler\">Close</$button>",
            "help": "http://tiddlywiki.com/static/SavingChanges.html",
            "text": "Your changes to this wiki need to be saved as a ~TiddlyWiki HTML file.\n\n!!! Desktop browsers\n\n# Select ''Save As'' from the ''File'' menu\n# Choose a filename and location\n#* Some browsers also require you to explicitly specify the file saving format as ''Webpage, HTML only'' or similar\n# Close this tab\n\n!!! Smartphone browsers\n\n# Create a bookmark to this page\n#* If you've got iCloud or Google Sync set up then the bookmark will automatically sync to your desktop where you can open it and save it as above\n# Close this tab\n\n//If you open the bookmark again in Mobile Safari you will see this message again. If you want to go ahead and use the file, just click the ''close'' button below//\n"
        },
        "$:/config/NewJournal/Title": {
            "title": "$:/config/NewJournal/Title",
            "text": "DDth MMM YYYY"
        },
        "$:/config/NewJournal/Tags": {
            "title": "$:/config/NewJournal/Tags",
            "text": "Journal"
        },
        "$:/language/Notifications/Save/Done": {
            "title": "$:/language/Notifications/Save/Done",
            "text": "Saved wiki"
        },
        "$:/language/Notifications/Save/Starting": {
            "title": "$:/language/Notifications/Save/Starting",
            "text": "Starting to save wiki"
        },
        "$:/language/Search/DefaultResults/Caption": {
            "title": "$:/language/Search/DefaultResults/Caption",
            "text": "List"
        },
        "$:/language/Search/Filter/Caption": {
            "title": "$:/language/Search/Filter/Caption",
            "text": "Filter"
        },
        "$:/language/Search/Filter/Hint": {
            "title": "$:/language/Search/Filter/Hint",
            "text": "Search via a [[filter expression|http://tiddlywiki.com/static/Filters.html]]"
        },
        "$:/language/Search/Filter/Matches": {
            "title": "$:/language/Search/Filter/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/Search/Matches": {
            "title": "$:/language/Search/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/Search/Matches/All": {
            "title": "$:/language/Search/Matches/All",
            "text": "All matches:"
        },
        "$:/language/Search/Matches/Title": {
            "title": "$:/language/Search/Matches/Title",
            "text": "Title matches:"
        },
        "$:/language/Search/Search": {
            "title": "$:/language/Search/Search",
            "text": "Search"
        },
        "$:/language/Search/Shadows/Caption": {
            "title": "$:/language/Search/Shadows/Caption",
            "text": "Shadows"
        },
        "$:/language/Search/Shadows/Hint": {
            "title": "$:/language/Search/Shadows/Hint",
            "text": "Search for shadow tiddlers"
        },
        "$:/language/Search/Shadows/Matches": {
            "title": "$:/language/Search/Shadows/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/Search/Standard/Caption": {
            "title": "$:/language/Search/Standard/Caption",
            "text": "Standard"
        },
        "$:/language/Search/Standard/Hint": {
            "title": "$:/language/Search/Standard/Hint",
            "text": "Search for standard tiddlers"
        },
        "$:/language/Search/Standard/Matches": {
            "title": "$:/language/Search/Standard/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/Search/System/Caption": {
            "title": "$:/language/Search/System/Caption",
            "text": "System"
        },
        "$:/language/Search/System/Hint": {
            "title": "$:/language/Search/System/Hint",
            "text": "Search for system tiddlers"
        },
        "$:/language/Search/System/Matches": {
            "title": "$:/language/Search/System/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/SideBar/All/Caption": {
            "title": "$:/language/SideBar/All/Caption",
            "text": "All"
        },
        "$:/language/SideBar/Contents/Caption": {
            "title": "$:/language/SideBar/Contents/Caption",
            "text": "Contents"
        },
        "$:/language/SideBar/Drafts/Caption": {
            "title": "$:/language/SideBar/Drafts/Caption",
            "text": "Drafts"
        },
        "$:/language/SideBar/Missing/Caption": {
            "title": "$:/language/SideBar/Missing/Caption",
            "text": "Missing"
        },
        "$:/language/SideBar/More/Caption": {
            "title": "$:/language/SideBar/More/Caption",
            "text": "More"
        },
        "$:/language/SideBar/Open/Caption": {
            "title": "$:/language/SideBar/Open/Caption",
            "text": "Open"
        },
        "$:/language/SideBar/Orphans/Caption": {
            "title": "$:/language/SideBar/Orphans/Caption",
            "text": "Orphans"
        },
        "$:/language/SideBar/Recent/Caption": {
            "title": "$:/language/SideBar/Recent/Caption",
            "text": "Recent"
        },
        "$:/language/SideBar/Shadows/Caption": {
            "title": "$:/language/SideBar/Shadows/Caption",
            "text": "Shadows"
        },
        "$:/language/SideBar/System/Caption": {
            "title": "$:/language/SideBar/System/Caption",
            "text": "System"
        },
        "$:/language/SideBar/Tags/Caption": {
            "title": "$:/language/SideBar/Tags/Caption",
            "text": "Tags"
        },
        "$:/language/SideBar/Tags/Untagged/Caption": {
            "title": "$:/language/SideBar/Tags/Untagged/Caption",
            "text": "untagged"
        },
        "$:/language/SideBar/Tools/Caption": {
            "title": "$:/language/SideBar/Tools/Caption",
            "text": "Tools"
        },
        "$:/language/SideBar/Types/Caption": {
            "title": "$:/language/SideBar/Types/Caption",
            "text": "Types"
        },
        "$:/SiteSubtitle": {
            "title": "$:/SiteSubtitle",
            "text": "a non-linear personal web notebook"
        },
        "$:/SiteTitle": {
            "title": "$:/SiteTitle",
            "text": "My ~TiddlyWiki"
        },
        "$:/language/Snippets/ListByTag": {
            "title": "$:/language/Snippets/ListByTag",
            "tags": "$:/tags/TextEditor/Snippet",
            "caption": "List of tiddlers by tag",
            "text": "<<list-links \"[tag[task]sort[title]]\">>\n"
        },
        "$:/language/Snippets/MacroDefinition": {
            "title": "$:/language/Snippets/MacroDefinition",
            "tags": "$:/tags/TextEditor/Snippet",
            "caption": "Macro definition",
            "text": "\\define macroName(param1:\"default value\",param2)\nText of the macro\n\\end\n"
        },
        "$:/language/Snippets/Table4x3": {
            "title": "$:/language/Snippets/Table4x3",
            "tags": "$:/tags/TextEditor/Snippet",
            "caption": "Table with 4 columns by 3 rows",
            "text": "|! |!Alpha |!Beta |!Gamma |!Delta |\n|!One | | | | |\n|!Two | | | | |\n|!Three | | | | |\n"
        },
        "$:/language/Snippets/TableOfContents": {
            "title": "$:/language/Snippets/TableOfContents",
            "tags": "$:/tags/TextEditor/Snippet",
            "caption": "Table of Contents",
            "text": "<div class=\"tc-table-of-contents\">\n\n<<toc-selective-expandable 'TableOfContents'>>\n\n</div>"
        },
        "$:/language/ThemeTweaks/ThemeTweaks": {
            "title": "$:/language/ThemeTweaks/ThemeTweaks",
            "text": "Theme Tweaks"
        },
        "$:/language/ThemeTweaks/ThemeTweaks/Hint": {
            "title": "$:/language/ThemeTweaks/ThemeTweaks/Hint",
            "text": "You can tweak certain aspects of the ''Vanilla'' theme."
        },
        "$:/language/ThemeTweaks/Options": {
            "title": "$:/language/ThemeTweaks/Options",
            "text": "Options"
        },
        "$:/language/ThemeTweaks/Options/SidebarLayout": {
            "title": "$:/language/ThemeTweaks/Options/SidebarLayout",
            "text": "Sidebar layout"
        },
        "$:/language/ThemeTweaks/Options/SidebarLayout/Fixed-Fluid": {
            "title": "$:/language/ThemeTweaks/Options/SidebarLayout/Fixed-Fluid",
            "text": "Fixed story, fluid sidebar"
        },
        "$:/language/ThemeTweaks/Options/SidebarLayout/Fluid-Fixed": {
            "title": "$:/language/ThemeTweaks/Options/SidebarLayout/Fluid-Fixed",
            "text": "Fluid story, fixed sidebar"
        },
        "$:/language/ThemeTweaks/Options/StickyTitles": {
            "title": "$:/language/ThemeTweaks/Options/StickyTitles",
            "text": "Sticky titles"
        },
        "$:/language/ThemeTweaks/Options/StickyTitles/Hint": {
            "title": "$:/language/ThemeTweaks/Options/StickyTitles/Hint",
            "text": "Causes tiddler titles to \"stick\" to the top of the browser window. Caution: Does not work at all with Chrome, and causes some layout issues in Firefox"
        },
        "$:/language/ThemeTweaks/Options/CodeWrapping": {
            "title": "$:/language/ThemeTweaks/Options/CodeWrapping",
            "text": "Wrap long lines in code blocks"
        },
        "$:/language/ThemeTweaks/Settings": {
            "title": "$:/language/ThemeTweaks/Settings",
            "text": "Settings"
        },
        "$:/language/ThemeTweaks/Settings/FontFamily": {
            "title": "$:/language/ThemeTweaks/Settings/FontFamily",
            "text": "Font family"
        },
        "$:/language/ThemeTweaks/Settings/CodeFontFamily": {
            "title": "$:/language/ThemeTweaks/Settings/CodeFontFamily",
            "text": "Code font family"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImage": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImage",
            "text": "Page background image"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment",
            "text": "Page background image attachment"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment/Scroll": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment/Scroll",
            "text": "Scroll with tiddlers"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment/Fixed": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment/Fixed",
            "text": "Fixed to window"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageSize": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageSize",
            "text": "Page background image size"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Auto": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Auto",
            "text": "Auto"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Cover": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Cover",
            "text": "Cover"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Contain": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Contain",
            "text": "Contain"
        },
        "$:/language/ThemeTweaks/Metrics": {
            "title": "$:/language/ThemeTweaks/Metrics",
            "text": "Sizes"
        },
        "$:/language/ThemeTweaks/Metrics/FontSize": {
            "title": "$:/language/ThemeTweaks/Metrics/FontSize",
            "text": "Font size"
        },
        "$:/language/ThemeTweaks/Metrics/LineHeight": {
            "title": "$:/language/ThemeTweaks/Metrics/LineHeight",
            "text": "Line height"
        },
        "$:/language/ThemeTweaks/Metrics/BodyFontSize": {
            "title": "$:/language/ThemeTweaks/Metrics/BodyFontSize",
            "text": "Font size for tiddler body"
        },
        "$:/language/ThemeTweaks/Metrics/BodyLineHeight": {
            "title": "$:/language/ThemeTweaks/Metrics/BodyLineHeight",
            "text": "Line height for tiddler body"
        },
        "$:/language/ThemeTweaks/Metrics/StoryLeft": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryLeft",
            "text": "Story left position"
        },
        "$:/language/ThemeTweaks/Metrics/StoryLeft/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryLeft/Hint",
            "text": "how far the left margin of the story river<br>(tiddler area) is from the left of the page"
        },
        "$:/language/ThemeTweaks/Metrics/StoryTop": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryTop",
            "text": "Story top position"
        },
        "$:/language/ThemeTweaks/Metrics/StoryTop/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryTop/Hint",
            "text": "how far the top margin of the story river<br>is from the top of the page"
        },
        "$:/language/ThemeTweaks/Metrics/StoryRight": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryRight",
            "text": "Story right"
        },
        "$:/language/ThemeTweaks/Metrics/StoryRight/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryRight/Hint",
            "text": "how far the left margin of the sidebar <br>is from the left of the page"
        },
        "$:/language/ThemeTweaks/Metrics/StoryWidth": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryWidth",
            "text": "Story width"
        },
        "$:/language/ThemeTweaks/Metrics/StoryWidth/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryWidth/Hint",
            "text": "the overall width of the story river"
        },
        "$:/language/ThemeTweaks/Metrics/TiddlerWidth": {
            "title": "$:/language/ThemeTweaks/Metrics/TiddlerWidth",
            "text": "Tiddler width"
        },
        "$:/language/ThemeTweaks/Metrics/TiddlerWidth/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/TiddlerWidth/Hint",
            "text": "within the story river"
        },
        "$:/language/ThemeTweaks/Metrics/SidebarBreakpoint": {
            "title": "$:/language/ThemeTweaks/Metrics/SidebarBreakpoint",
            "text": "Sidebar breakpoint"
        },
        "$:/language/ThemeTweaks/Metrics/SidebarBreakpoint/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/SidebarBreakpoint/Hint",
            "text": "the minimum page width at which the story<br>river and sidebar will appear side by side"
        },
        "$:/language/ThemeTweaks/Metrics/SidebarWidth": {
            "title": "$:/language/ThemeTweaks/Metrics/SidebarWidth",
            "text": "Sidebar width"
        },
        "$:/language/ThemeTweaks/Metrics/SidebarWidth/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/SidebarWidth/Hint",
            "text": "the width of the sidebar in fluid-fixed layout"
        },
        "$:/language/TiddlerInfo/Advanced/Caption": {
            "title": "$:/language/TiddlerInfo/Advanced/Caption",
            "text": "Advanced"
        },
        "$:/language/TiddlerInfo/Advanced/PluginInfo/Empty/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/PluginInfo/Empty/Hint",
            "text": "none"
        },
        "$:/language/TiddlerInfo/Advanced/PluginInfo/Heading": {
            "title": "$:/language/TiddlerInfo/Advanced/PluginInfo/Heading",
            "text": "Plugin Details"
        },
        "$:/language/TiddlerInfo/Advanced/PluginInfo/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/PluginInfo/Hint",
            "text": "This plugin contains the following shadow tiddlers:"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/Heading": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/Heading",
            "text": "Shadow Status"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/NotShadow/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/NotShadow/Hint",
            "text": "The tiddler <$link to=<<infoTiddler>>><$text text=<<infoTiddler>>/></$link> is not a shadow tiddler"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/Shadow/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/Shadow/Hint",
            "text": "The tiddler <$link to=<<infoTiddler>>><$text text=<<infoTiddler>>/></$link> is a shadow tiddler"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/Shadow/Source": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/Shadow/Source",
            "text": "It is defined in the plugin <$link to=<<pluginTiddler>>><$text text=<<pluginTiddler>>/></$link>"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/OverriddenShadow/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/OverriddenShadow/Hint",
            "text": "It is overridden by an ordinary tiddler"
        },
        "$:/language/TiddlerInfo/Fields/Caption": {
            "title": "$:/language/TiddlerInfo/Fields/Caption",
            "text": "Fields"
        },
        "$:/language/TiddlerInfo/List/Caption": {
            "title": "$:/language/TiddlerInfo/List/Caption",
            "text": "List"
        },
        "$:/language/TiddlerInfo/List/Empty": {
            "title": "$:/language/TiddlerInfo/List/Empty",
            "text": "This tiddler does not have a list"
        },
        "$:/language/TiddlerInfo/Listed/Caption": {
            "title": "$:/language/TiddlerInfo/Listed/Caption",
            "text": "Listed"
        },
        "$:/language/TiddlerInfo/Listed/Empty": {
            "title": "$:/language/TiddlerInfo/Listed/Empty",
            "text": "This tiddler is not listed by any others"
        },
        "$:/language/TiddlerInfo/References/Caption": {
            "title": "$:/language/TiddlerInfo/References/Caption",
            "text": "References"
        },
        "$:/language/TiddlerInfo/References/Empty": {
            "title": "$:/language/TiddlerInfo/References/Empty",
            "text": "No tiddlers link to this one"
        },
        "$:/language/TiddlerInfo/Tagging/Caption": {
            "title": "$:/language/TiddlerInfo/Tagging/Caption",
            "text": "Tagging"
        },
        "$:/language/TiddlerInfo/Tagging/Empty": {
            "title": "$:/language/TiddlerInfo/Tagging/Empty",
            "text": "No tiddlers are tagged with this one"
        },
        "$:/language/TiddlerInfo/Tools/Caption": {
            "title": "$:/language/TiddlerInfo/Tools/Caption",
            "text": "Tools"
        },
        "$:/language/Docs/Types/application/javascript": {
            "title": "$:/language/Docs/Types/application/javascript",
            "description": "JavaScript code",
            "name": "application/javascript",
            "group": "Developer"
        },
        "$:/language/Docs/Types/application/json": {
            "title": "$:/language/Docs/Types/application/json",
            "description": "JSON data",
            "name": "application/json",
            "group": "Developer"
        },
        "$:/language/Docs/Types/application/x-tiddler-dictionary": {
            "title": "$:/language/Docs/Types/application/x-tiddler-dictionary",
            "description": "Data dictionary",
            "name": "application/x-tiddler-dictionary",
            "group": "Developer"
        },
        "$:/language/Docs/Types/image/gif": {
            "title": "$:/language/Docs/Types/image/gif",
            "description": "GIF image",
            "name": "image/gif",
            "group": "Image"
        },
        "$:/language/Docs/Types/image/jpeg": {
            "title": "$:/language/Docs/Types/image/jpeg",
            "description": "JPEG image",
            "name": "image/jpeg",
            "group": "Image"
        },
        "$:/language/Docs/Types/image/png": {
            "title": "$:/language/Docs/Types/image/png",
            "description": "PNG image",
            "name": "image/png",
            "group": "Image"
        },
        "$:/language/Docs/Types/image/svg+xml": {
            "title": "$:/language/Docs/Types/image/svg+xml",
            "description": "Structured Vector Graphics image",
            "name": "image/svg+xml",
            "group": "Image"
        },
        "$:/language/Docs/Types/image/x-icon": {
            "title": "$:/language/Docs/Types/image/x-icon",
            "description": "ICO format icon file",
            "name": "image/x-icon",
            "group": "Image"
        },
        "$:/language/Docs/Types/text/css": {
            "title": "$:/language/Docs/Types/text/css",
            "description": "Static stylesheet",
            "name": "text/css",
            "group": "Developer"
        },
        "$:/language/Docs/Types/text/html": {
            "title": "$:/language/Docs/Types/text/html",
            "description": "HTML markup",
            "name": "text/html",
            "group": "Text"
        },
        "$:/language/Docs/Types/text/plain": {
            "title": "$:/language/Docs/Types/text/plain",
            "description": "Plain text",
            "name": "text/plain",
            "group": "Text"
        },
        "$:/language/Docs/Types/text/vnd.tiddlywiki": {
            "title": "$:/language/Docs/Types/text/vnd.tiddlywiki",
            "description": "TiddlyWiki 5",
            "name": "text/vnd.tiddlywiki",
            "group": "Text"
        },
        "$:/language/Docs/Types/text/x-tiddlywiki": {
            "title": "$:/language/Docs/Types/text/x-tiddlywiki",
            "description": "TiddlyWiki Classic",
            "name": "text/x-tiddlywiki",
            "group": "Text"
        },
        "$:/languages/en-GB": {
            "title": "$:/languages/en-GB",
            "name": "en-GB",
            "description": "English (British)",
            "author": "JeremyRuston",
            "core-version": ">=5.0.0\"",
            "text": "Stub pseudo-plugin for the default language"
        },
        "$:/core/modules/commander.js": {
            "text": "/*\\\ntitle: $:/core/modules/commander.js\ntype: application/javascript\nmodule-type: global\n\nThe $tw.Commander class is a command interpreter\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nParse a sequence of commands\n\tcommandTokens: an array of command string tokens\n\twiki: reference to the wiki store object\n\tstreams: {output:, error:}, each of which has a write(string) method\n\tcallback: a callback invoked as callback(err) where err is null if there was no error\n*/\nvar Commander = function(commandTokens,callback,wiki,streams) {\n\tvar path = require(\"path\");\n\tthis.commandTokens = commandTokens;\n\tthis.nextToken = 0;\n\tthis.callback = callback;\n\tthis.wiki = wiki;\n\tthis.streams = streams;\n\tthis.outputPath = path.resolve($tw.boot.wikiPath,$tw.config.wikiOutputSubDir);\n};\n\n/*\nAdd a string of tokens to the command queue\n*/\nCommander.prototype.addCommandTokens = function(commandTokens) {\n\tvar params = commandTokens.slice(0);\n\tparams.unshift(0);\n\tparams.unshift(this.nextToken);\n\tArray.prototype.splice.apply(this.commandTokens,params);\n};\n\n/*\nExecute the sequence of commands and invoke a callback on completion\n*/\nCommander.prototype.execute = function() {\n\tthis.executeNextCommand();\n};\n\n/*\nExecute the next command in the sequence\n*/\nCommander.prototype.executeNextCommand = function() {\n\tvar self = this;\n\t// Invoke the callback if there are no more commands\n\tif(this.nextToken >= this.commandTokens.length) {\n\t\tthis.callback(null);\n\t} else {\n\t\t// Get and check the command token\n\t\tvar commandName = this.commandTokens[this.nextToken++];\n\t\tif(commandName.substr(0,2) !== \"--\") {\n\t\t\tthis.callback(\"Missing command: \" + commandName);\n\t\t} else {\n\t\t\tcommandName = commandName.substr(2); // Trim off the --\n\t\t\t// Accumulate the parameters to the command\n\t\t\tvar params = [];\n\t\t\twhile(this.nextToken < this.commandTokens.length && \n\t\t\t\tthis.commandTokens[this.nextToken].substr(0,2) !== \"--\") {\n\t\t\t\tparams.push(this.commandTokens[this.nextToken++]);\n\t\t\t}\n\t\t\t// Get the command info\n\t\t\tvar command = $tw.commands[commandName],\n\t\t\t\tc,err;\n\t\t\tif(!command) {\n\t\t\t\tthis.callback(\"Unknown command: \" + commandName);\n\t\t\t} else {\n\t\t\t\tif(this.verbose) {\n\t\t\t\t\tthis.streams.output.write(\"Executing command: \" + commandName + \" \" + params.join(\" \") + \"\\n\");\n\t\t\t\t}\n\t\t\t\tif(command.info.synchronous) {\n\t\t\t\t\t// Synchronous command\n\t\t\t\t\tc = new command.Command(params,this);\n\t\t\t\t\terr = c.execute();\n\t\t\t\t\tif(err) {\n\t\t\t\t\t\tthis.callback(err);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis.executeNextCommand();\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Asynchronous command\n\t\t\t\t\tc = new command.Command(params,this,function(err) {\n\t\t\t\t\t\tif(err) {\n\t\t\t\t\t\t\tself.callback(err);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tself.executeNextCommand();\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t\terr = c.execute();\n\t\t\t\t\tif(err) {\n\t\t\t\t\t\tthis.callback(err);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n};\n\nCommander.initCommands = function(moduleType) {\n\tmoduleType = moduleType || \"command\";\n\t$tw.commands = {};\n\t$tw.modules.forEachModuleOfType(moduleType,function(title,module) {\n\t\tvar c = $tw.commands[module.info.name] = {};\n\t\t// Add the methods defined by the module\n\t\tfor(var f in module) {\n\t\t\tif($tw.utils.hop(module,f)) {\n\t\t\t\tc[f] = module[f];\n\t\t\t}\n\t\t}\n\t});\n};\n\nexports.Commander = Commander;\n\n})();\n",
            "title": "$:/core/modules/commander.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/commands/build.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/build.js\ntype: application/javascript\nmodule-type: command\n\nCommand to build a build target\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"build\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\t// Get the build targets defined in the wiki\n\tvar buildTargets = $tw.boot.wikiInfo.build;\n\tif(!buildTargets) {\n\t\treturn \"No build targets defined\";\n\t}\n\t// Loop through each of the specified targets\n\tvar targets;\n\tif(this.params.length > 0) {\n\t\ttargets = this.params;\n\t} else {\n\t\ttargets = Object.keys(buildTargets);\n\t}\n\tfor(var targetIndex=0; targetIndex<targets.length; targetIndex++) {\n\t\tvar target = targets[targetIndex],\n\t\t\tcommands = buildTargets[target];\n\t\tif(!commands) {\n\t\t\treturn \"Build target '\" + target + \"' not found\";\n\t\t}\n\t\t// Add the commands to the queue\n\t\tthis.commander.addCommandTokens(commands);\n\t}\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/build.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/clearpassword.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/clearpassword.js\ntype: application/javascript\nmodule-type: command\n\nClear password for crypto operations\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"clearpassword\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\t$tw.crypto.setPassword(null);\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/clearpassword.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/editions.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/editions.js\ntype: application/javascript\nmodule-type: command\n\nCommand to list the available editions\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"editions\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tvar self = this;\n\t// Output the list\n\tthis.commander.streams.output.write(\"Available editions:\\n\\n\");\n\tvar editionInfo = $tw.utils.getEditionInfo();\n\t$tw.utils.each(editionInfo,function(info,name) {\n\t\tself.commander.streams.output.write(\"    \" + name + \": \" + info.description + \"\\n\");\n\t});\n\tthis.commander.streams.output.write(\"\\n\");\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/editions.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/help.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/help.js\ntype: application/javascript\nmodule-type: command\n\nHelp command\n\n\\*/\n(function(){\n\n/*jshint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"help\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tvar subhelp = this.params[0] || \"default\",\n\t\thelpBase = \"$:/language/Help/\",\n\t\ttext;\n\tif(!this.commander.wiki.getTiddler(helpBase + subhelp)) {\n\t\tsubhelp = \"notfound\";\n\t}\n\t// Wikify the help as formatted text (ie block elements generate newlines)\n\ttext = this.commander.wiki.renderTiddler(\"text/plain-formatted\",helpBase + subhelp);\n\t// Remove any leading linebreaks\n\ttext = text.replace(/^(\\r?\\n)*/g,\"\");\n\tthis.commander.streams.output.write(text);\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/help.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/init.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/init.js\ntype: application/javascript\nmodule-type: command\n\nCommand to initialise an empty wiki folder\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"init\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tvar fs = require(\"fs\"),\n\t\tpath = require(\"path\");\n\t// Check that we don't already have a valid wiki folder\n\tif($tw.boot.wikiTiddlersPath || ($tw.utils.isDirectory($tw.boot.wikiPath) && !$tw.utils.isDirectoryEmpty($tw.boot.wikiPath))) {\n\t\treturn \"Wiki folder is not empty\";\n\t}\n\t// Loop through each of the specified editions\n\tvar editions = this.params.length > 0 ? this.params : [\"empty\"];\n\tfor(var editionIndex=0; editionIndex<editions.length; editionIndex++) {\n\t\tvar editionName = editions[editionIndex];\n\t\t// Check the edition exists\n\t\tvar editionPath = $tw.findLibraryItem(editionName,$tw.getLibraryItemSearchPaths($tw.config.editionsPath,$tw.config.editionsEnvVar));\n\t\tif(!$tw.utils.isDirectory(editionPath)) {\n\t\t\treturn \"Edition '\" + editionName + \"' not found\";\n\t\t}\n\t\t// Copy the edition content\n\t\tvar err = $tw.utils.copyDirectory(editionPath,$tw.boot.wikiPath);\n\t\tif(!err) {\n\t\t\tthis.commander.streams.output.write(\"Copied edition '\" + editionName + \"' to \" + $tw.boot.wikiPath + \"\\n\");\n\t\t} else {\n\t\t\treturn err;\n\t\t}\n\t}\n\t// Tweak the tiddlywiki.info to remove any included wikis\n\tvar packagePath = $tw.boot.wikiPath + \"/tiddlywiki.info\",\n\t\tpackageJson = JSON.parse(fs.readFileSync(packagePath));\n\tdelete packageJson.includeWikis;\n\tfs.writeFileSync(packagePath,JSON.stringify(packageJson,null,$tw.config.preferences.jsonSpaces));\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/init.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/load.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/load.js\ntype: application/javascript\nmodule-type: command\n\nCommand to load tiddlers from a file\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"load\",\n\tsynchronous: false\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\");\n\tif(this.params.length < 1) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar ext = path.extname(self.params[0]);\n\tfs.readFile(this.params[0],$tw.utils.getTypeEncoding(ext),function(err,data) {\n\t\tif (err) {\n\t\t\tself.callback(err);\n\t\t} else {\n\t\t\tvar fields = {title: self.params[0]},\n\t\t\t\ttype = path.extname(self.params[0]);\n\t\t\tvar tiddlers = self.commander.wiki.deserializeTiddlers(type,data,fields);\n\t\t\tif(!tiddlers) {\n\t\t\t\tself.callback(\"No tiddlers found in file \\\"\" + self.params[0] + \"\\\"\");\n\t\t\t} else {\n\t\t\t\tfor(var t=0; t<tiddlers.length; t++) {\n\t\t\t\t\tself.commander.wiki.importTiddler(new $tw.Tiddler(tiddlers[t]));\n\t\t\t\t}\n\t\t\t\tself.callback(null);\t\n\t\t\t}\n\t\t}\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/load.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/makelibrary.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/makelibrary.js\ntype: application/javascript\nmodule-type: command\n\nCommand to pack all of the plugins in the library into a plugin tiddler of type \"library\"\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"makelibrary\",\n\tsynchronous: true\n};\n\nvar UPGRADE_LIBRARY_TITLE = \"$:/UpgradeLibrary\";\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tvar wiki = this.commander.wiki,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\tupgradeLibraryTitle = this.params[0] || UPGRADE_LIBRARY_TITLE,\n\t\ttiddlers = {};\n\t// Collect up the library plugins\n\tvar collectPlugins = function(folder) {\n\t\t\tvar pluginFolders = fs.readdirSync(folder);\n\t\t\tfor(var p=0; p<pluginFolders.length; p++) {\n\t\t\t\tif(!$tw.boot.excludeRegExp.test(pluginFolders[p])) {\n\t\t\t\t\tpluginFields = $tw.loadPluginFolder(path.resolve(folder,\"./\" + pluginFolders[p]));\n\t\t\t\t\tif(pluginFields && pluginFields.title) {\n\t\t\t\t\t\ttiddlers[pluginFields.title] = pluginFields;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tcollectPublisherPlugins = function(folder) {\n\t\t\tvar publisherFolders = fs.readdirSync(folder);\n\t\t\tfor(var t=0; t<publisherFolders.length; t++) {\n\t\t\t\tif(!$tw.boot.excludeRegExp.test(publisherFolders[t])) {\n\t\t\t\t\tcollectPlugins(path.resolve(folder,\"./\" + publisherFolders[t]));\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\tcollectPublisherPlugins(path.resolve($tw.boot.corePath,$tw.config.pluginsPath));\n\tcollectPublisherPlugins(path.resolve($tw.boot.corePath,$tw.config.themesPath));\n\tcollectPlugins(path.resolve($tw.boot.corePath,$tw.config.languagesPath));\n\t// Save the upgrade library tiddler\n\tvar pluginFields = {\n\t\ttitle: upgradeLibraryTitle,\n\t\ttype: \"application/json\",\n\t\t\"plugin-type\": \"library\",\n\t\t\"text\": JSON.stringify({tiddlers: tiddlers},null,$tw.config.preferences.jsonSpaces)\n\t};\n\twiki.addTiddler(new $tw.Tiddler(pluginFields));\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/makelibrary.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/output.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/output.js\ntype: application/javascript\nmodule-type: command\n\nCommand to set the default output location (defaults to current working directory)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"output\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tvar fs = require(\"fs\"),\n\t\tpath = require(\"path\");\n\tif(this.params.length < 1) {\n\t\treturn \"Missing output path\";\n\t}\n\tthis.commander.outputPath = path.resolve(process.cwd(),this.params[0]);\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/output.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/password.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/password.js\ntype: application/javascript\nmodule-type: command\n\nSave password for crypto operations\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"password\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 1) {\n\t\treturn \"Missing password\";\n\t}\n\t$tw.crypto.setPassword(this.params[0]);\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/password.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/rendertiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/rendertiddler.js\ntype: application/javascript\nmodule-type: command\n\nCommand to render a tiddler and save it to a file\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"rendertiddler\",\n\tsynchronous: false\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 2) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\ttitle = this.params[0],\n\t\tfilename = path.resolve(this.commander.outputPath,this.params[1]),\n\t\ttype = this.params[2] || \"text/html\",\n\t\ttemplate = this.params[3],\n\t\tvariables = {};\n\t$tw.utils.createFileDirectories(filename);\n\tif(template) {\n\t\tvariables.currentTiddler = title;\n\t\ttitle = template;\n\t}\n\tfs.writeFile(filename,this.commander.wiki.renderTiddler(type,title,{variables: variables}),\"utf8\",function(err) {\n\t\tself.callback(err);\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/rendertiddler.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/rendertiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/rendertiddlers.js\ntype: application/javascript\nmodule-type: command\n\nCommand to render several tiddlers to a folder of files\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.info = {\n\tname: \"rendertiddlers\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 2) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\twiki = this.commander.wiki,\n\t\tfilter = this.params[0],\n\t\ttemplate = this.params[1],\n\t\toutputPath = this.commander.outputPath,\n\t\tpathname = path.resolve(outputPath,this.params[2]),\t\t\n\t\ttype = this.params[3] || \"text/html\",\n\t\textension = this.params[4] || \".html\",\n\t\tdeleteDirectory = (this.params[5] || \"\").toLowerCase() !== \"noclean\",\n\t\ttiddlers = wiki.filterTiddlers(filter);\n\tif(deleteDirectory) {\n\t\t$tw.utils.deleteDirectory(pathname);\n\t}\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar parser = wiki.parseTiddler(template),\n\t\t\twidgetNode = wiki.makeWidget(parser,{variables: {currentTiddler: title}}),\n\t\t\tcontainer = $tw.fakeDocument.createElement(\"div\");\n\t\twidgetNode.render(container,null);\n\t\tvar text = type === \"text/html\" ? container.innerHTML : container.textContent,\n\t\t\texportPath = null;\n\t\tif($tw.utils.hop($tw.macros,\"tv-get-export-path\")) {\n\t\t\tvar macroPath = $tw.macros[\"tv-get-export-path\"].run.apply(self,[title]);\n\t\t\tif(macroPath) {\n\t\t\t\texportPath = path.resolve(outputPath,macroPath + extension);\n\t\t\t}\n\t\t}\n\t\tvar finalPath = exportPath || path.resolve(pathname,encodeURIComponent(title) + extension);\n\t\t$tw.utils.createFileDirectories(finalPath);\n\t\tfs.writeFileSync(finalPath,text,\"utf8\");\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/rendertiddlers.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/savelibrarytiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/savelibrarytiddlers.js\ntype: application/javascript\nmodule-type: command\n\nCommand to save the subtiddlers of a bundle tiddler as a series of JSON files\n\n--savelibrarytiddlers <tiddler> <pathname> <skinnylisting>\n\nThe tiddler identifies the bundle tiddler that contains the subtiddlers.\n\nThe pathname specifies the pathname to the folder in which the JSON files should be saved. The filename is the URL encoded title of the subtiddler.\n\nThe skinnylisting specifies the title of the tiddler to which a JSON catalogue of the subtiddlers will be saved. The JSON file contains the same data as the bundle tiddler but with the `text` field removed.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"savelibrarytiddlers\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 2) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\tcontainerTitle = this.params[0],\n\t\tfilter = this.params[1],\n\t\tbasepath = this.params[2],\n\t\tskinnyListTitle = this.params[3];\n\t// Get the container tiddler as data\n\tvar containerData = self.commander.wiki.getTiddlerDataCached(containerTitle,undefined);\n\tif(!containerData) {\n\t\treturn \"'\" + containerTitle + \"' is not a tiddler bundle\";\n\t}\n\t// Filter the list of plugins\n\tvar pluginList = [];\n\t$tw.utils.each(containerData.tiddlers,function(tiddler,title) {\n\t\tpluginList.push(title);\n\t});\n\tvar filteredPluginList;\n\tif(filter) {\n\t\tfilteredPluginList = self.commander.wiki.filterTiddlers(filter,null,self.commander.wiki.makeTiddlerIterator(pluginList));\n\t} else {\n\t\tfilteredPluginList = pluginList;\n\t}\n\t// Iterate through the plugins\n\tvar skinnyList = [];\n\t$tw.utils.each(filteredPluginList,function(title) {\n\t\tvar tiddler = containerData.tiddlers[title];\n\t\t// Save each JSON file and collect the skinny data\n\t\tvar pathname = path.resolve(self.commander.outputPath,basepath + encodeURIComponent(title) + \".json\");\n\t\t$tw.utils.createFileDirectories(pathname);\n\t\tfs.writeFileSync(pathname,JSON.stringify(tiddler,null,$tw.config.preferences.jsonSpaces),\"utf8\");\n\t\t// Collect the skinny list data\n\t\tvar pluginTiddlers = JSON.parse(tiddler.text),\n\t\t\treadmeContent = (pluginTiddlers.tiddlers[title + \"/readme\"] || {}).text,\n\t\t\ticonTiddler = pluginTiddlers.tiddlers[title + \"/icon\"] || {},\n\t\t\ticonType = iconTiddler.type,\n\t\t\ticonText = iconTiddler.text,\n\t\t\ticonContent;\n\t\tif(iconType && iconText) {\n\t\t\ticonContent = $tw.utils.makeDataUri(iconText,iconType);\n\t\t}\n\t\tskinnyList.push($tw.utils.extend({},tiddler,{text: undefined, readme: readmeContent, icon: iconContent}));\n\t});\n\t// Save the catalogue tiddler\n\tif(skinnyListTitle) {\n\t\tself.commander.wiki.setTiddlerData(skinnyListTitle,skinnyList);\n\t}\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/savelibrarytiddlers.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/savetiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/savetiddler.js\ntype: application/javascript\nmodule-type: command\n\nCommand to save the content of a tiddler to a file\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"savetiddler\",\n\tsynchronous: false\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 2) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\ttitle = this.params[0],\n\t\tfilename = path.resolve(this.commander.outputPath,this.params[1]),\n\t\ttiddler = this.commander.wiki.getTiddler(title);\n\tif(tiddler) {\n\t\tvar type = tiddler.fields.type || \"text/vnd.tiddlywiki\",\n\t\t\tcontentTypeInfo = $tw.config.contentTypeInfo[type] || {encoding: \"utf8\"};\n\t\t$tw.utils.createFileDirectories(filename);\n\t\tfs.writeFile(filename,tiddler.fields.text,contentTypeInfo.encoding,function(err) {\n\t\t\tself.callback(err);\n\t\t});\n\t} else {\n\t\treturn \"Missing tiddler: \" + title;\n\t}\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/savetiddler.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/savetiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/savetiddlers.js\ntype: application/javascript\nmodule-type: command\n\nCommand to save several tiddlers to a folder of files\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.info = {\n\tname: \"savetiddlers\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 1) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\twiki = this.commander.wiki,\n\t\tfilter = this.params[0],\n\t\tpathname = path.resolve(this.commander.outputPath,this.params[1]),\n\t\tdeleteDirectory = (this.params[2] || \"\").toLowerCase() !== \"noclean\",\n\t\ttiddlers = wiki.filterTiddlers(filter);\n\tif(deleteDirectory) {\n\t\t$tw.utils.deleteDirectory(pathname);\n\t}\n\t$tw.utils.createDirectory(pathname);\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar tiddler = self.commander.wiki.getTiddler(title),\n\t\t\ttype = tiddler.fields.type || \"text/vnd.tiddlywiki\",\n\t\t\tcontentTypeInfo = $tw.config.contentTypeInfo[type] || {encoding: \"utf8\"},\n\t\t\tfilename = path.resolve(pathname,encodeURIComponent(title));\n\t\tfs.writeFileSync(filename,tiddler.fields.text,contentTypeInfo.encoding);\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/savetiddlers.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/server.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/server.js\ntype: application/javascript\nmodule-type: command\n\nServe tiddlers over http\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nif($tw.node) {\n\tvar util = require(\"util\"),\n\t\tfs = require(\"fs\"),\n\t\turl = require(\"url\"),\n\t\tpath = require(\"path\"),\n\t\thttp = require(\"http\");\n}\n\nexports.info = {\n\tname: \"server\",\n\tsynchronous: true\n};\n\n/*\nA simple HTTP server with regexp-based routes\n*/\nfunction SimpleServer(options) {\n\tthis.routes = options.routes || [];\n\tthis.wiki = options.wiki;\n\tthis.variables = options.variables || {};\n}\n\nSimpleServer.prototype.set = function(obj) {\n\tvar self = this;\n\t$tw.utils.each(obj,function(value,name) {\n\t\tself.variables[name] = value;\n\t});\n};\n\nSimpleServer.prototype.get = function(name) {\n\treturn this.variables[name];\n};\n\nSimpleServer.prototype.addRoute = function(route) {\n\tthis.routes.push(route);\n};\n\nSimpleServer.prototype.findMatchingRoute = function(request,state) {\n\tvar pathprefix = this.get(\"pathprefix\") || \"\";\n\tfor(var t=0; t<this.routes.length; t++) {\n\t\tvar potentialRoute = this.routes[t],\n\t\t\tpathRegExp = potentialRoute.path,\n\t\t\tpathname = state.urlInfo.pathname,\n\t\t\tmatch;\n\t\tif(pathprefix) {\n\t\t\tif(pathname.substr(0,pathprefix.length) === pathprefix) {\n\t\t\t\tpathname = pathname.substr(pathprefix.length);\n\t\t\t\tmatch = potentialRoute.path.exec(pathname);\n\t\t\t} else {\n\t\t\t\tmatch = false;\n\t\t\t}\n\t\t} else {\n\t\t\tmatch = potentialRoute.path.exec(pathname);\n\t\t}\n\t\tif(match && request.method === potentialRoute.method) {\n\t\t\tstate.params = [];\n\t\t\tfor(var p=1; p<match.length; p++) {\n\t\t\t\tstate.params.push(match[p]);\n\t\t\t}\n\t\t\treturn potentialRoute;\n\t\t}\n\t}\n\treturn null;\n};\n\nSimpleServer.prototype.checkCredentials = function(request,incomingUsername,incomingPassword) {\n\tvar header = request.headers.authorization || \"\",\n\t\ttoken = header.split(/\\s+/).pop() || \"\",\n\t\tauth = $tw.utils.base64Decode(token),\n\t\tparts = auth.split(/:/),\n\t\tusername = parts[0],\n\t\tpassword = parts[1];\n\tif(incomingUsername === username && incomingPassword === password) {\n\t\treturn \"ALLOWED\";\n\t} else {\n\t\treturn \"DENIED\";\n\t}\n};\n\nSimpleServer.prototype.listen = function(port,host) {\n\tvar self = this;\n\thttp.createServer(function(request,response) {\n\t\t// Compose the state object\n\t\tvar state = {};\n\t\tstate.wiki = self.wiki;\n\t\tstate.server = self;\n\t\tstate.urlInfo = url.parse(request.url);\n\t\t// Find the route that matches this path\n\t\tvar route = self.findMatchingRoute(request,state);\n\t\t// Check for the username and password if we've got one\n\t\tvar username = self.get(\"username\"),\n\t\t\tpassword = self.get(\"password\");\n\t\tif(username && password) {\n\t\t\t// Check they match\n\t\t\tif(self.checkCredentials(request,username,password) !== \"ALLOWED\") {\n\t\t\t\tvar servername = state.wiki.getTiddlerText(\"$:/SiteTitle\") || \"TiddlyWiki5\";\n\t\t\t\tresponse.writeHead(401,\"Authentication required\",{\n\t\t\t\t\t\"WWW-Authenticate\": 'Basic realm=\"Please provide your username and password to login to ' + servername + '\"'\n\t\t\t\t});\n\t\t\t\tresponse.end();\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t\t// Return a 404 if we didn't find a route\n\t\tif(!route) {\n\t\t\tresponse.writeHead(404);\n\t\t\tresponse.end();\n\t\t\treturn;\n\t\t}\n\t\t// Set the encoding for the incoming request\n\t\t// TODO: Presumably this would need tweaking if we supported PUTting binary tiddlers\n\t\trequest.setEncoding(\"utf8\");\n\t\t// Dispatch the appropriate method\n\t\tswitch(request.method) {\n\t\t\tcase \"GET\": // Intentional fall-through\n\t\t\tcase \"DELETE\":\n\t\t\t\troute.handler(request,response,state);\n\t\t\t\tbreak;\n\t\t\tcase \"PUT\":\n\t\t\t\tvar data = \"\";\n\t\t\t\trequest.on(\"data\",function(chunk) {\n\t\t\t\t\tdata += chunk.toString();\n\t\t\t\t});\n\t\t\t\trequest.on(\"end\",function() {\n\t\t\t\t\tstate.data = data;\n\t\t\t\t\troute.handler(request,response,state);\n\t\t\t\t});\n\t\t\t\tbreak;\n\t\t}\n\t}).listen(port,host);\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n\t// Set up server\n\tthis.server = new SimpleServer({\n\t\twiki: this.commander.wiki\n\t});\n\t// Add route handlers\n\tthis.server.addRoute({\n\t\tmethod: \"PUT\",\n\t\tpath: /^\\/recipes\\/default\\/tiddlers\\/(.+)$/,\n\t\thandler: function(request,response,state) {\n\t\t\tvar title = decodeURIComponent(state.params[0]),\n\t\t\t\tfields = JSON.parse(state.data);\n\t\t\t// Pull up any subfields in the `fields` object\n\t\t\tif(fields.fields) {\n\t\t\t\t$tw.utils.each(fields.fields,function(field,name) {\n\t\t\t\t\tfields[name] = field;\n\t\t\t\t});\n\t\t\t\tdelete fields.fields;\n\t\t\t}\n\t\t\t// Remove any revision field\n\t\t\tif(fields.revision) {\n\t\t\t\tdelete fields.revision;\n\t\t\t}\n\t\t\tstate.wiki.addTiddler(new $tw.Tiddler(state.wiki.getCreationFields(),fields,{title: title},state.wiki.getModificationFields()));\n\t\t\tvar changeCount = state.wiki.getChangeCount(title).toString();\n\t\t\tresponse.writeHead(204, \"OK\",{\n\t\t\t\tEtag: \"\\\"default/\" + encodeURIComponent(title) + \"/\" + changeCount + \":\\\"\",\n\t\t\t\t\"Content-Type\": \"text/plain\"\n\t\t\t});\n\t\t\tresponse.end();\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"DELETE\",\n\t\tpath: /^\\/bags\\/default\\/tiddlers\\/(.+)$/,\n\t\thandler: function(request,response,state) {\n\t\t\tvar title = decodeURIComponent(state.params[0]);\n\t\t\tstate.wiki.deleteTiddler(title);\n\t\t\tresponse.writeHead(204, \"OK\", {\n\t\t\t\t\"Content-Type\": \"text/plain\"\n\t\t\t});\n\t\t\tresponse.end();\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/$/,\n\t\thandler: function(request,response,state) {\n\t\t\tresponse.writeHead(200, {\"Content-Type\": state.server.get(\"serveType\")});\n\t\t\tvar text = state.wiki.renderTiddler(state.server.get(\"renderType\"),state.server.get(\"rootTiddler\"));\n\t\t\tresponse.end(text,\"utf8\");\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/status$/,\n\t\thandler: function(request,response,state) {\n\t\t\tresponse.writeHead(200, {\"Content-Type\": \"application/json\"});\n\t\t\tvar text = JSON.stringify({\n\t\t\t\tusername: state.server.get(\"username\"),\n\t\t\t\tspace: {\n\t\t\t\t\trecipe: \"default\"\n\t\t\t\t},\n\t\t\t\ttiddlywiki_version: $tw.version\n\t\t\t});\n\t\t\tresponse.end(text,\"utf8\");\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/favicon.ico$/,\n\t\thandler: function(request,response,state) {\n\t\t\tresponse.writeHead(200, {\"Content-Type\": \"image/x-icon\"});\n\t\t\tvar buffer = state.wiki.getTiddlerText(\"$:/favicon.ico\",\"\");\n\t\t\tresponse.end(buffer,\"base64\");\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/recipes\\/default\\/tiddlers.json$/,\n\t\thandler: function(request,response,state) {\n\t\t\tresponse.writeHead(200, {\"Content-Type\": \"application/json\"});\n\t\t\tvar tiddlers = [];\n\t\t\tstate.wiki.forEachTiddler({sortField: \"title\"},function(title,tiddler) {\n\t\t\t\tvar tiddlerFields = {};\n\t\t\t\t$tw.utils.each(tiddler.fields,function(field,name) {\n\t\t\t\t\tif(name !== \"text\") {\n\t\t\t\t\t\ttiddlerFields[name] = tiddler.getFieldString(name);\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\ttiddlerFields.revision = state.wiki.getChangeCount(title);\n\t\t\t\ttiddlerFields.type = tiddlerFields.type || \"text/vnd.tiddlywiki\";\n\t\t\t\ttiddlers.push(tiddlerFields);\n\t\t\t});\n\t\t\tvar text = JSON.stringify(tiddlers);\n\t\t\tresponse.end(text,\"utf8\");\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/recipes\\/default\\/tiddlers\\/(.+)$/,\n\t\thandler: function(request,response,state) {\n\t\t\tvar title = decodeURIComponent(state.params[0]),\n\t\t\t\ttiddler = state.wiki.getTiddler(title),\n\t\t\t\ttiddlerFields = {},\n\t\t\t\tknownFields = [\n\t\t\t\t\t\"bag\", \"created\", \"creator\", \"modified\", \"modifier\", \"permissions\", \"recipe\", \"revision\", \"tags\", \"text\", \"title\", \"type\", \"uri\"\n\t\t\t\t];\n\t\t\tif(tiddler) {\n\t\t\t\t$tw.utils.each(tiddler.fields,function(field,name) {\n\t\t\t\t\tvar value = tiddler.getFieldString(name);\n\t\t\t\t\tif(knownFields.indexOf(name) !== -1) {\n\t\t\t\t\t\ttiddlerFields[name] = value;\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttiddlerFields.fields = tiddlerFields.fields || {};\n\t\t\t\t\t\ttiddlerFields.fields[name] = value;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\ttiddlerFields.revision = state.wiki.getChangeCount(title);\n\t\t\t\ttiddlerFields.type = tiddlerFields.type || \"text/vnd.tiddlywiki\";\n\t\t\t\tresponse.writeHead(200, {\"Content-Type\": \"application/json\"});\n\t\t\t\tresponse.end(JSON.stringify(tiddlerFields),\"utf8\");\n\t\t\t} else {\n\t\t\t\tresponse.writeHead(404);\n\t\t\t\tresponse.end();\n\t\t\t}\n\t\t}\n\t});\n};\n\nCommand.prototype.execute = function() {\n\tif(!$tw.boot.wikiTiddlersPath) {\n\t\t$tw.utils.warning(\"Warning: Wiki folder '\" + $tw.boot.wikiPath + \"' does not exist or is missing a tiddlywiki.info file\");\n\t}\n\tvar port = this.params[0] || \"8080\",\n\t\trootTiddler = this.params[1] || \"$:/core/save/all\",\n\t\trenderType = this.params[2] || \"text/plain\",\n\t\tserveType = this.params[3] || \"text/html\",\n\t\tusername = this.params[4],\n\t\tpassword = this.params[5],\n\t\thost = this.params[6] || \"127.0.0.1\",\n\t\tpathprefix = this.params[7];\n\tthis.server.set({\n\t\trootTiddler: rootTiddler,\n\t\trenderType: renderType,\n\t\tserveType: serveType,\n\t\tusername: username,\n\t\tpassword: password,\n\t\tpathprefix: pathprefix\n\t});\n\tthis.server.listen(port,host);\n\tconsole.log(\"Serving on \" + host + \":\" + port);\n\tconsole.log(\"(press ctrl-C to exit)\");\n\t// Warn if required plugins are missing\n\tif(!$tw.wiki.getTiddler(\"$:/plugins/tiddlywiki/tiddlyweb\") || !$tw.wiki.getTiddler(\"$:/plugins/tiddlywiki/filesystem\")) {\n\t\t$tw.utils.warning(\"Warning: Plugins required for client-server operation (\\\"tiddlywiki/filesystem\\\" and \\\"tiddlywiki/tiddlyweb\\\") are missing from tiddlywiki.info file\");\n\t}\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/server.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/setfield.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/setfield.js\ntype: application/javascript\nmodule-type: command\n\nCommand to modify selected tiddlers to set a field to the text of a template tiddler that has been wikified with the selected tiddler as the current tiddler.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.info = {\n\tname: \"setfield\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 4) {\n\t\treturn \"Missing parameters\";\n\t}\n\tvar self = this,\n\t\twiki = this.commander.wiki,\n\t\tfilter = this.params[0],\n\t\tfieldname = this.params[1] || \"text\",\n\t\ttemplatetitle = this.params[2],\n\t\trendertype = this.params[3] || \"text/plain\",\n\t\ttiddlers = wiki.filterTiddlers(filter);\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar parser = wiki.parseTiddler(templatetitle),\n\t\t\tnewFields = {},\n\t\t\ttiddler = wiki.getTiddler(title);\n\t\tif(parser) {\n\t\t\tvar widgetNode = wiki.makeWidget(parser,{variables: {currentTiddler: title}});\n\t\t\tvar container = $tw.fakeDocument.createElement(\"div\");\n\t\t\twidgetNode.render(container,null);\n\t\t\tnewFields[fieldname] = rendertype === \"text/html\" ? container.innerHTML : container.textContent;\n\t\t} else {\n\t\t\tnewFields[fieldname] = undefined;\n\t\t}\n\t\twiki.addTiddler(new $tw.Tiddler(tiddler,newFields));\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/setfield.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/unpackplugin.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/unpackplugin.js\ntype: application/javascript\nmodule-type: command\n\nCommand to extract the shadow tiddlers from within a plugin\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"unpackplugin\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 1) {\n\t\treturn \"Missing plugin name\";\n\t}\n\tvar self = this,\n\t\ttitle = this.params[0],\n\t\tpluginData = this.commander.wiki.getTiddlerDataCached(title);\n\tif(!pluginData) {\n\t\treturn \"Plugin '\" + title + \"' not found\";\n\t}\n\t$tw.utils.each(pluginData.tiddlers,function(tiddler) {\n\t\tself.commander.wiki.addTiddler(new $tw.Tiddler(tiddler));\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/unpackplugin.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/verbose.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/verbose.js\ntype: application/javascript\nmodule-type: command\n\nVerbose command\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"verbose\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tthis.commander.verbose = true;\n\t// Output the boot message log\n\tthis.commander.streams.output.write(\"Boot log:\\n  \" + $tw.boot.logMessages.join(\"\\n  \") + \"\\n\");\n\treturn null; // No error\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/verbose.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/version.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/version.js\ntype: application/javascript\nmodule-type: command\n\nVersion command\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"version\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tthis.commander.streams.output.write($tw.version + \"\\n\");\n\treturn null; // No error\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/version.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/config.js": {
            "text": "/*\\\ntitle: $:/core/modules/config.js\ntype: application/javascript\nmodule-type: config\n\nCore configuration constants\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.preferences = {};\n\nexports.preferences.notificationDuration = 3 * 1000;\nexports.preferences.jsonSpaces = 4;\n\nexports.textPrimitives = {\n\tupperLetter: \"[A-Z\\u00c0-\\u00d6\\u00d8-\\u00de\\u0150\\u0170]\",\n\tlowerLetter: \"[a-z\\u00df-\\u00f6\\u00f8-\\u00ff\\u0151\\u0171]\",\n\tanyLetter:   \"[A-Za-z0-9\\u00c0-\\u00d6\\u00d8-\\u00de\\u00df-\\u00f6\\u00f8-\\u00ff\\u0150\\u0170\\u0151\\u0171]\",\n\tblockPrefixLetters:\t\"[A-Za-z0-9-_\\u00c0-\\u00d6\\u00d8-\\u00de\\u00df-\\u00f6\\u00f8-\\u00ff\\u0150\\u0170\\u0151\\u0171]\"\n};\n\nexports.textPrimitives.unWikiLink = \"~\";\nexports.textPrimitives.wikiLink = exports.textPrimitives.upperLetter + \"+\" +\n\texports.textPrimitives.lowerLetter + \"+\" +\n\texports.textPrimitives.upperLetter +\n\texports.textPrimitives.anyLetter + \"*\";\n\nexports.htmlEntities = {quot:34, amp:38, apos:39, lt:60, gt:62, nbsp:160, iexcl:161, cent:162, pound:163, curren:164, yen:165, brvbar:166, sect:167, uml:168, copy:169, ordf:170, laquo:171, not:172, shy:173, reg:174, macr:175, deg:176, plusmn:177, sup2:178, sup3:179, acute:180, micro:181, para:182, middot:183, cedil:184, sup1:185, ordm:186, raquo:187, frac14:188, frac12:189, frac34:190, iquest:191, Agrave:192, Aacute:193, Acirc:194, Atilde:195, Auml:196, Aring:197, AElig:198, Ccedil:199, Egrave:200, Eacute:201, Ecirc:202, Euml:203, Igrave:204, Iacute:205, Icirc:206, Iuml:207, ETH:208, Ntilde:209, Ograve:210, Oacute:211, Ocirc:212, Otilde:213, Ouml:214, times:215, Oslash:216, Ugrave:217, Uacute:218, Ucirc:219, Uuml:220, Yacute:221, THORN:222, szlig:223, agrave:224, aacute:225, acirc:226, atilde:227, auml:228, aring:229, aelig:230, ccedil:231, egrave:232, eacute:233, ecirc:234, euml:235, igrave:236, iacute:237, icirc:238, iuml:239, eth:240, ntilde:241, ograve:242, oacute:243, ocirc:244, otilde:245, ouml:246, divide:247, oslash:248, ugrave:249, uacute:250, ucirc:251, uuml:252, yacute:253, thorn:254, yuml:255, OElig:338, oelig:339, Scaron:352, scaron:353, Yuml:376, fnof:402, circ:710, tilde:732, Alpha:913, Beta:914, Gamma:915, Delta:916, Epsilon:917, Zeta:918, Eta:919, Theta:920, Iota:921, Kappa:922, Lambda:923, Mu:924, Nu:925, Xi:926, Omicron:927, Pi:928, Rho:929, Sigma:931, Tau:932, Upsilon:933, Phi:934, Chi:935, Psi:936, Omega:937, alpha:945, beta:946, gamma:947, delta:948, epsilon:949, zeta:950, eta:951, theta:952, iota:953, kappa:954, lambda:955, mu:956, nu:957, xi:958, omicron:959, pi:960, rho:961, sigmaf:962, sigma:963, tau:964, upsilon:965, phi:966, chi:967, psi:968, omega:969, thetasym:977, upsih:978, piv:982, ensp:8194, emsp:8195, thinsp:8201, zwnj:8204, zwj:8205, lrm:8206, rlm:8207, ndash:8211, mdash:8212, lsquo:8216, rsquo:8217, sbquo:8218, ldquo:8220, rdquo:8221, bdquo:8222, dagger:8224, Dagger:8225, bull:8226, hellip:8230, permil:8240, prime:8242, Prime:8243, lsaquo:8249, rsaquo:8250, oline:8254, frasl:8260, euro:8364, image:8465, weierp:8472, real:8476, trade:8482, alefsym:8501, larr:8592, uarr:8593, rarr:8594, darr:8595, harr:8596, crarr:8629, lArr:8656, uArr:8657, rArr:8658, dArr:8659, hArr:8660, forall:8704, part:8706, exist:8707, empty:8709, nabla:8711, isin:8712, notin:8713, ni:8715, prod:8719, sum:8721, minus:8722, lowast:8727, radic:8730, prop:8733, infin:8734, ang:8736, and:8743, or:8744, cap:8745, cup:8746, int:8747, there4:8756, sim:8764, cong:8773, asymp:8776, ne:8800, equiv:8801, le:8804, ge:8805, sub:8834, sup:8835, nsub:8836, sube:8838, supe:8839, oplus:8853, otimes:8855, perp:8869, sdot:8901, lceil:8968, rceil:8969, lfloor:8970, rfloor:8971, lang:9001, rang:9002, loz:9674, spades:9824, clubs:9827, hearts:9829, diams:9830 };\n\nexports.htmlVoidElements = \"area,base,br,col,command,embed,hr,img,input,keygen,link,meta,param,source,track,wbr\".split(\",\");\n\nexports.htmlBlockElements = \"address,article,aside,audio,blockquote,canvas,dd,div,dl,fieldset,figcaption,figure,footer,form,h1,h2,h3,h4,h5,h6,header,hgroup,hr,li,noscript,ol,output,p,pre,section,table,tfoot,ul,video\".split(\",\");\n\nexports.htmlUnsafeElements = \"script\".split(\",\");\n\n})();\n",
            "title": "$:/core/modules/config.js",
            "type": "application/javascript",
            "module-type": "config"
        },
        "$:/core/modules/deserializers.js": {
            "text": "/*\\\ntitle: $:/core/modules/deserializers.js\ntype: application/javascript\nmodule-type: tiddlerdeserializer\n\nFunctions to deserialise tiddlers from a block of text\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nUtility function to parse an old-style tiddler DIV in a *.tid file. It looks like this:\n\n<div title=\"Title\" creator=\"JoeBloggs\" modifier=\"JoeBloggs\" created=\"201102111106\" modified=\"201102111310\" tags=\"myTag [[my long tag]]\">\n<pre>The text of the tiddler (without the expected HTML encoding).\n</pre>\n</div>\n\nNote that the field attributes are HTML encoded, but that the body of the <PRE> tag is not encoded.\n\nWhen these tiddler DIVs are encountered within a TiddlyWiki HTML file then the body is encoded in the usual way.\n*/\nvar parseTiddlerDiv = function(text /* [,fields] */) {\n\t// Slot together the default results\n\tvar result = {};\n\tif(arguments.length > 1) {\n\t\tfor(var f=1; f<arguments.length; f++) {\n\t\t\tvar fields = arguments[f];\n\t\t\tfor(var t in fields) {\n\t\t\t\tresult[t] = fields[t];\t\t\n\t\t\t}\n\t\t}\n\t}\n\t// Parse the DIV body\n\tvar startRegExp = /^\\s*<div\\s+([^>]*)>(\\s*<pre>)?/gi,\n\t\tendRegExp,\n\t\tmatch = startRegExp.exec(text);\n\tif(match) {\n\t\t// Old-style DIVs don't have the <pre> tag\n\t\tif(match[2]) {\n\t\t\tendRegExp = /<\\/pre>\\s*<\\/div>\\s*$/gi;\n\t\t} else {\n\t\t\tendRegExp = /<\\/div>\\s*$/gi;\n\t\t}\n\t\tvar endMatch = endRegExp.exec(text);\n\t\tif(endMatch) {\n\t\t\t// Extract the text\n\t\t\tresult.text = text.substring(match.index + match[0].length,endMatch.index);\n\t\t\t// Process the attributes\n\t\t\tvar attrRegExp = /\\s*([^=\\s]+)\\s*=\\s*(?:\"([^\"]*)\"|'([^']*)')/gi,\n\t\t\t\tattrMatch;\n\t\t\tdo {\n\t\t\t\tattrMatch = attrRegExp.exec(match[1]);\n\t\t\t\tif(attrMatch) {\n\t\t\t\t\tvar name = attrMatch[1];\n\t\t\t\t\tvar value = attrMatch[2] !== undefined ? attrMatch[2] : attrMatch[3];\n\t\t\t\t\tresult[name] = value;\n\t\t\t\t}\n\t\t\t} while(attrMatch);\n\t\t\treturn result;\n\t\t}\n\t}\n\treturn undefined;\n};\n\nexports[\"application/x-tiddler-html-div\"] = function(text,fields) {\n\treturn [parseTiddlerDiv(text,fields)];\n};\n\nexports[\"application/json\"] = function(text,fields) {\n\tvar incoming = JSON.parse(text),\n\t\tresults = [];\n\tif($tw.utils.isArray(incoming)) {\n\t\tfor(var t=0; t<incoming.length; t++) {\n\t\t\tvar incomingFields = incoming[t],\n\t\t\t\tfields = {};\n\t\t\tfor(var f in incomingFields) {\n\t\t\t\tif(typeof incomingFields[f] === \"string\") {\n\t\t\t\t\tfields[f] = incomingFields[f];\n\t\t\t\t}\n\t\t\t}\n\t\t\tresults.push(fields);\n\t\t}\n\t}\n\treturn results;\n};\n\n/*\nParse an HTML file into tiddlers. There are three possibilities:\n# A TiddlyWiki classic HTML file containing `text/x-tiddlywiki` tiddlers\n# A TiddlyWiki5 HTML file containing `text/vnd.tiddlywiki` tiddlers\n# An ordinary HTML file\n*/\nexports[\"text/html\"] = function(text,fields) {\n\t// Check if we've got a store area\n\tvar storeAreaMarkerRegExp = /<div id=[\"']?storeArea['\"]?( style=[\"']?display:none;[\"']?)?>/gi,\n\t\tmatch = storeAreaMarkerRegExp.exec(text);\n\tif(match) {\n\t\t// If so, it's either a classic TiddlyWiki file or an unencrypted TW5 file\n\t\t// First read the normal tiddlers\n\t\tvar results = deserializeTiddlyWikiFile(text,storeAreaMarkerRegExp.lastIndex,!!match[1],fields);\n\t\t// Then any system tiddlers\n\t\tvar systemAreaMarkerRegExp = /<div id=[\"']?systemArea['\"]?( style=[\"']?display:none;[\"']?)?>/gi,\n\t\t\tsysMatch = systemAreaMarkerRegExp.exec(text);\n\t\tif(sysMatch) {\n\t\t\tresults.push.apply(results,deserializeTiddlyWikiFile(text,systemAreaMarkerRegExp.lastIndex,!!sysMatch[1],fields));\n\t\t}\n\t\treturn results;\n\t} else {\n\t\t// Check whether we've got an encrypted file\n\t\tvar encryptedStoreArea = $tw.utils.extractEncryptedStoreArea(text);\n\t\tif(encryptedStoreArea) {\n\t\t\t// If so, attempt to decrypt it using the current password\n\t\t\treturn $tw.utils.decryptStoreArea(encryptedStoreArea);\n\t\t} else {\n\t\t\t// It's not a TiddlyWiki so we'll return the entire HTML file as a tiddler\n\t\t\treturn deserializeHtmlFile(text,fields);\n\t\t}\n\t}\n};\n\nfunction deserializeHtmlFile(text,fields) {\n\tvar result = {};\n\t$tw.utils.each(fields,function(value,name) {\n\t\tresult[name] = value;\n\t});\n\tresult.text = text;\n\tresult.type = \"text/html\";\n\treturn [result];\n}\n\nfunction deserializeTiddlyWikiFile(text,storeAreaEnd,isTiddlyWiki5,fields) {\n\tvar results = [],\n\t\tendOfDivRegExp = /(<\\/div>\\s*)/gi,\n\t\tstartPos = storeAreaEnd,\n\t\tdefaultType = isTiddlyWiki5 ? undefined : \"text/x-tiddlywiki\";\n\tendOfDivRegExp.lastIndex = startPos;\n\tvar match = endOfDivRegExp.exec(text);\n\twhile(match) {\n\t\tvar endPos = endOfDivRegExp.lastIndex,\n\t\t\ttiddlerFields = parseTiddlerDiv(text.substring(startPos,endPos),fields,{type: defaultType});\n\t\tif(!tiddlerFields) {\n\t\t\tbreak;\n\t\t}\n\t\t$tw.utils.each(tiddlerFields,function(value,name) {\n\t\t\tif(typeof value === \"string\") {\n\t\t\t\ttiddlerFields[name] = $tw.utils.htmlDecode(value);\n\t\t\t}\n\t\t});\n\t\tif(tiddlerFields.text !== null) {\n\t\t\tresults.push(tiddlerFields);\n\t\t}\n\t\tstartPos = endPos;\n\t\tmatch = endOfDivRegExp.exec(text);\n\t}\n\treturn results;\n}\n\n})();\n",
            "title": "$:/core/modules/deserializers.js",
            "type": "application/javascript",
            "module-type": "tiddlerdeserializer"
        },
        "$:/core/modules/editor/engines/framed.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/engines/framed.js\ntype: application/javascript\nmodule-type: library\n\nText editor engine based on a simple input or textarea within an iframe. This is done so that the selection is preserved even when clicking away from the textarea\n\n\\*/\n(function(){\n\n/*jslint node: true,browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar HEIGHT_VALUE_TITLE = \"$:/config/TextEditor/EditorHeight/Height\";\n\nfunction FramedEngine(options) {\n\t// Save our options\n\toptions = options || {};\n\tthis.widget = options.widget;\n\tthis.value = options.value;\n\tthis.parentNode = options.parentNode;\n\tthis.nextSibling = options.nextSibling;\n\t// Create our hidden dummy text area for reading styles\n\tthis.dummyTextArea = this.widget.document.createElement(\"textarea\");\n\tif(this.widget.editClass) {\n\t\tthis.dummyTextArea.className = this.widget.editClass;\n\t}\n\tthis.dummyTextArea.setAttribute(\"hidden\",\"true\");\n\tthis.parentNode.insertBefore(this.dummyTextArea,this.nextSibling);\n\tthis.widget.domNodes.push(this.dummyTextArea);\n\t// Create the iframe\n\tthis.iframeNode = this.widget.document.createElement(\"iframe\");\n\tthis.parentNode.insertBefore(this.iframeNode,this.nextSibling);\n\tthis.iframeDoc = this.iframeNode.contentWindow.document;\n\t// (Firefox requires us to put some empty content in the iframe)\n\tthis.iframeDoc.open();\n\tthis.iframeDoc.write(\"\");\n\tthis.iframeDoc.close();\n\t// Style the iframe\n\tthis.iframeNode.className = this.dummyTextArea.className;\n\tthis.iframeNode.style.border = \"none\";\n\tthis.iframeNode.style.padding = \"0\";\n\tthis.iframeNode.style.resize = \"none\";\n\tthis.iframeDoc.body.style.margin = \"0\";\n\tthis.iframeDoc.body.style.padding = \"0\";\n\tthis.widget.domNodes.push(this.iframeNode);\n\t// Construct the textarea or input node\n\tvar tag = this.widget.editTag;\n\tif($tw.config.htmlUnsafeElements.indexOf(tag) !== -1) {\n\t\ttag = \"input\";\n\t}\n\tthis.domNode = this.iframeDoc.createElement(tag);\n\t// Set the text\n\tif(this.widget.editTag === \"textarea\") {\n\t\tthis.domNode.appendChild(this.iframeDoc.createTextNode(this.value));\n\t} else {\n\t\tthis.domNode.value = this.value;\n\t}\n\t// Set the attributes\n\tif(this.widget.editType) {\n\t\tthis.domNode.setAttribute(\"type\",this.widget.editType);\n\t}\n\tif(this.widget.editPlaceholder) {\n\t\tthis.domNode.setAttribute(\"placeholder\",this.widget.editPlaceholder);\n\t}\n\tif(this.widget.editSize) {\n\t\tthis.domNode.setAttribute(\"size\",this.widget.editSize);\n\t}\n\tif(this.widget.editRows) {\n\t\tthis.domNode.setAttribute(\"rows\",this.widget.editRows);\n\t}\n\t// Copy the styles from the dummy textarea\n\tthis.copyStyles();\n\t// Add event listeners\n\t$tw.utils.addEventListeners(this.domNode,[\n\t\t{name: \"input\",handlerObject: this,handlerMethod: \"handleInputEvent\"},\n\t\t{name: \"keydown\",handlerObject: this.widget,handlerMethod: \"handleKeydownEvent\"}\n\t]);\n\t// Insert the element into the DOM\n\tthis.iframeDoc.body.appendChild(this.domNode);\n}\n\n/*\nCopy styles from the dummy text area to the textarea in the iframe\n*/\nFramedEngine.prototype.copyStyles = function() {\n\t// Copy all styles\n\t$tw.utils.copyStyles(this.dummyTextArea,this.domNode);\n\t// Override the ones that should not be set the same as the dummy textarea\n\tthis.domNode.style.display = \"block\";\n\tthis.domNode.style.width = \"100%\";\n\tthis.domNode.style.margin = \"0\";\n\t// In Chrome setting -webkit-text-fill-color overrides the placeholder text colour\n\tthis.domNode.style[\"-webkit-text-fill-color\"] = \"currentcolor\";\n};\n\n/*\nSet the text of the engine if it doesn't currently have focus\n*/\nFramedEngine.prototype.setText = function(text,type) {\n\tif(!this.domNode.isTiddlyWikiFakeDom) {\n\t\tif(this.domNode.ownerDocument.activeElement !== this.domNode) {\n\t\t\tthis.domNode.value = text;\n\t\t}\n\t\t// Fix the height if needed\n\t\tthis.fixHeight();\n\t}\n};\n\n/*\nGet the text of the engine\n*/\nFramedEngine.prototype.getText = function() {\n\treturn this.domNode.value;\n};\n\n/*\nFix the height of textarea to fit content\n*/\nFramedEngine.prototype.fixHeight = function() {\n\t// Make sure styles are updated\n\tthis.copyStyles();\n\t// Adjust height\n\tif(this.widget.editTag === \"textarea\") {\n\t\tif(this.widget.editAutoHeight) {\n\t\t\tif(this.domNode && !this.domNode.isTiddlyWikiFakeDom) {\n\t\t\t\tvar newHeight = $tw.utils.resizeTextAreaToFit(this.domNode,this.widget.editMinHeight);\n\t\t\t\tthis.iframeNode.style.height = (newHeight + 14) + \"px\"; // +14 for the border on the textarea\n\t\t\t}\n\t\t} else {\n\t\t\tvar fixedHeight = parseInt(this.widget.wiki.getTiddlerText(HEIGHT_VALUE_TITLE,\"400px\"),10);\n\t\t\tfixedHeight = Math.max(fixedHeight,20);\n\t\t\tthis.domNode.style.height = fixedHeight + \"px\";\n\t\t\tthis.iframeNode.style.height = (fixedHeight + 14) + \"px\";\n\t\t}\n\t}\n};\n\n/*\nFocus the engine node\n*/\nFramedEngine.prototype.focus  = function() {\n\tif(this.domNode.focus && this.domNode.select) {\n\t\tthis.domNode.focus();\n\t\tthis.domNode.select();\n\t}\n};\n\n/*\nHandle a dom \"input\" event which occurs when the text has changed\n*/\nFramedEngine.prototype.handleInputEvent = function(event) {\n\tthis.widget.saveChanges(this.getText());\n\tthis.fixHeight();\n\treturn true;\n};\n\n/*\nCreate a blank structure representing a text operation\n*/\nFramedEngine.prototype.createTextOperation = function() {\n\tvar operation = {\n\t\ttext: this.domNode.value,\n\t\tselStart: this.domNode.selectionStart,\n\t\tselEnd: this.domNode.selectionEnd,\n\t\tcutStart: null,\n\t\tcutEnd: null,\n\t\treplacement: null,\n\t\tnewSelStart: null,\n\t\tnewSelEnd: null\n\t};\n\toperation.selection = operation.text.substring(operation.selStart,operation.selEnd);\n\treturn operation;\n};\n\n/*\nExecute a text operation\n*/\nFramedEngine.prototype.executeTextOperation = function(operation) {\n\t// Perform the required changes to the text area and the underlying tiddler\n\tvar newText = operation.text;\n\tif(operation.replacement !== null) {\n\t\tnewText = operation.text.substring(0,operation.cutStart) + operation.replacement + operation.text.substring(operation.cutEnd);\n\t\t// Attempt to use a execCommand to modify the value of the control\n\t\tif(this.iframeDoc.queryCommandSupported(\"insertText\") && this.iframeDoc.queryCommandSupported(\"delete\") && !$tw.browser.isFirefox) {\n\t\t\tthis.domNode.focus();\n\t\t\tthis.domNode.setSelectionRange(operation.cutStart,operation.cutEnd);\n\t\t\tif(operation.replacement === \"\") {\n\t\t\t\tthis.iframeDoc.execCommand(\"delete\",false,\"\");\n\t\t\t} else {\n\t\t\t\tthis.iframeDoc.execCommand(\"insertText\",false,operation.replacement);\n\t\t\t}\n\t\t} else {\n\t\t\tthis.domNode.value = newText;\n\t\t}\n\t\tthis.domNode.focus();\n\t\tthis.domNode.setSelectionRange(operation.newSelStart,operation.newSelEnd);\n\t}\n\tthis.domNode.focus();\n\treturn newText;\n};\n\nexports.FramedEngine = FramedEngine;\n\n})();\n",
            "title": "$:/core/modules/editor/engines/framed.js",
            "type": "application/javascript",
            "module-type": "library"
        },
        "$:/core/modules/editor/engines/simple.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/engines/simple.js\ntype: application/javascript\nmodule-type: library\n\nText editor engine based on a simple input or textarea tag\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar HEIGHT_VALUE_TITLE = \"$:/config/TextEditor/EditorHeight/Height\";\n\nfunction SimpleEngine(options) {\n\t// Save our options\n\toptions = options || {};\n\tthis.widget = options.widget;\n\tthis.value = options.value;\n\tthis.parentNode = options.parentNode;\n\tthis.nextSibling = options.nextSibling;\n\t// Construct the textarea or input node\n\tvar tag = this.widget.editTag;\n\tif($tw.config.htmlUnsafeElements.indexOf(tag) !== -1) {\n\t\ttag = \"input\";\n\t}\n\tthis.domNode = this.widget.document.createElement(tag);\n\t// Set the text\n\tif(this.widget.editTag === \"textarea\") {\n\t\tthis.domNode.appendChild(this.widget.document.createTextNode(this.value));\n\t} else {\n\t\tthis.domNode.value = this.value;\n\t}\n\t// Set the attributes\n\tif(this.widget.editType) {\n\t\tthis.domNode.setAttribute(\"type\",this.widget.editType);\n\t}\n\tif(this.widget.editPlaceholder) {\n\t\tthis.domNode.setAttribute(\"placeholder\",this.widget.editPlaceholder);\n\t}\n\tif(this.widget.editSize) {\n\t\tthis.domNode.setAttribute(\"size\",this.widget.editSize);\n\t}\n\tif(this.widget.editRows) {\n\t\tthis.domNode.setAttribute(\"rows\",this.widget.editRows);\n\t}\n\tif(this.widget.editClass) {\n\t\tthis.domNode.className = this.widget.editClass;\n\t}\n\t// Add an input event handler\n\t$tw.utils.addEventListeners(this.domNode,[\n\t\t{name: \"focus\", handlerObject: this, handlerMethod: \"handleFocusEvent\"},\n\t\t{name: \"input\", handlerObject: this, handlerMethod: \"handleInputEvent\"}\n\t]);\n\t// Insert the element into the DOM\n\tthis.parentNode.insertBefore(this.domNode,this.nextSibling);\n\tthis.widget.domNodes.push(this.domNode);\n}\n\n/*\nSet the text of the engine if it doesn't currently have focus\n*/\nSimpleEngine.prototype.setText = function(text,type) {\n\tif(!this.domNode.isTiddlyWikiFakeDom) {\n\t\tif(this.domNode.ownerDocument.activeElement !== this.domNode) {\n\t\t\tthis.domNode.value = text;\n\t\t}\n\t\t// Fix the height if needed\n\t\tthis.fixHeight();\n\t}\n};\n\n/*\nGet the text of the engine\n*/\nSimpleEngine.prototype.getText = function() {\n\treturn this.domNode.value;\n};\n\n/*\nFix the height of textarea to fit content\n*/\nSimpleEngine.prototype.fixHeight = function() {\n\tif(this.widget.editTag === \"textarea\") {\n\t\tif(this.widget.editAutoHeight) {\n\t\t\tif(this.domNode && !this.domNode.isTiddlyWikiFakeDom) {\n\t\t\t\t$tw.utils.resizeTextAreaToFit(this.domNode,this.widget.editMinHeight);\n\t\t\t}\n\t\t} else {\n\t\t\tvar fixedHeight = parseInt(this.widget.wiki.getTiddlerText(HEIGHT_VALUE_TITLE,\"400px\"),10);\n\t\t\tfixedHeight = Math.max(fixedHeight,20);\n\t\t\tthis.domNode.style.height = fixedHeight + \"px\";\n\t\t}\n\t}\n};\n\n/*\nFocus the engine node\n*/\nSimpleEngine.prototype.focus  = function() {\n\tif(this.domNode.focus && this.domNode.select) {\n\t\tthis.domNode.focus();\n\t\tthis.domNode.select();\n\t}\n};\n\n/*\nHandle a dom \"input\" event which occurs when the text has changed\n*/\nSimpleEngine.prototype.handleInputEvent = function(event) {\n\tthis.widget.saveChanges(this.getText());\n\tthis.fixHeight();\n\treturn true;\n};\n\n/*\nHandle a dom \"focus\" event\n*/\nSimpleEngine.prototype.handleFocusEvent = function(event) {\n\tif(this.widget.editFocusPopup) {\n\t\t$tw.popup.triggerPopup({\n\t\t\tdomNode: this.domNode,\n\t\t\ttitle: this.widget.editFocusPopup,\n\t\t\twiki: this.widget.wiki,\n\t\t\tforce: true\n\t\t});\n\t}\n\treturn true;\n};\n\n/*\nCreate a blank structure representing a text operation\n*/\nSimpleEngine.prototype.createTextOperation = function() {\n\treturn null;\n};\n\n/*\nExecute a text operation\n*/\nSimpleEngine.prototype.executeTextOperation = function(operation) {\n};\n\nexports.SimpleEngine = SimpleEngine;\n\n})();\n",
            "title": "$:/core/modules/editor/engines/simple.js",
            "type": "application/javascript",
            "module-type": "library"
        },
        "$:/core/modules/editor/factory.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/factory.js\ntype: application/javascript\nmodule-type: library\n\nFactory for constructing text editor widgets with specified engines for the toolbar and non-toolbar cases\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar DEFAULT_MIN_TEXT_AREA_HEIGHT = \"100px\"; // Minimum height of textareas in pixels\n\n// Configuration tiddlers\nvar HEIGHT_MODE_TITLE = \"$:/config/TextEditor/EditorHeight/Mode\";\nvar ENABLE_TOOLBAR_TITLE = \"$:/config/TextEditor/EnableToolbar\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nfunction editTextWidgetFactory(toolbarEngine,nonToolbarEngine) {\n\n\tvar EditTextWidget = function(parseTreeNode,options) {\n\t\t// Initialise the editor operations if they've not been done already\n\t\tif(!this.editorOperations) {\n\t\t\tEditTextWidget.prototype.editorOperations = {};\n\t\t\t$tw.modules.applyMethods(\"texteditoroperation\",this.editorOperations);\n\t\t}\n\t\tthis.initialise(parseTreeNode,options);\n\t};\n\n\t/*\n\tInherit from the base widget class\n\t*/\n\tEditTextWidget.prototype = new Widget();\n\n\t/*\n\tRender this widget into the DOM\n\t*/\n\tEditTextWidget.prototype.render = function(parent,nextSibling) {\n\t\t// Save the parent dom node\n\t\tthis.parentDomNode = parent;\n\t\t// Compute our attributes\n\t\tthis.computeAttributes();\n\t\t// Execute our logic\n\t\tthis.execute();\n\t\t// Create the wrapper for the toolbar and render its content\n\t\tif(this.editShowToolbar) {\n\t\t\tthis.toolbarNode = this.document.createElement(\"div\");\n\t\t\tthis.toolbarNode.className = \"tc-editor-toolbar\";\n\t\t\tparent.insertBefore(this.toolbarNode,nextSibling);\n\t\t\tthis.renderChildren(this.toolbarNode,null);\n\t\t\tthis.domNodes.push(this.toolbarNode);\n\t\t}\n\t\t// Create our element\n\t\tvar editInfo = this.getEditInfo(),\n\t\t\tEngine = this.editShowToolbar ? toolbarEngine : nonToolbarEngine;\n\t\tthis.engine = new Engine({\n\t\t\t\twidget: this,\n\t\t\t\tvalue: editInfo.value,\n\t\t\t\ttype: editInfo.type,\n\t\t\t\tparentNode: parent,\n\t\t\t\tnextSibling: nextSibling\n\t\t\t});\n\t\t// Call the postRender hook\n\t\tif(this.postRender) {\n\t\t\tthis.postRender();\n\t\t}\n\t\t// Fix height\n\t\tthis.engine.fixHeight();\n\t\t// Focus if required\n\t\tif(this.editFocus === \"true\" || this.editFocus === \"yes\") {\n\t\t\tthis.engine.focus();\n\t\t}\n\t\t// Add widget message listeners\n\t\tthis.addEventListeners([\n\t\t\t{type: \"tm-edit-text-operation\", handler: \"handleEditTextOperationMessage\"}\n\t\t]);\n\t};\n\n\t/*\n\tGet the tiddler being edited and current value\n\t*/\n\tEditTextWidget.prototype.getEditInfo = function() {\n\t\t// Get the edit value\n\t\tvar self = this,\n\t\t\tvalue,\n\t\t\ttype = \"text/plain\",\n\t\t\tupdate;\n\t\tif(this.editIndex) {\n\t\t\tvalue = this.wiki.extractTiddlerDataItem(this.editTitle,this.editIndex,this.editDefault);\n\t\t\tupdate = function(value) {\n\t\t\t\tvar data = self.wiki.getTiddlerData(self.editTitle,{});\n\t\t\t\tif(data[self.editIndex] !== value) {\n\t\t\t\t\tdata[self.editIndex] = value;\n\t\t\t\t\tself.wiki.setTiddlerData(self.editTitle,data);\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Get the current tiddler and the field name\n\t\t\tvar tiddler = this.wiki.getTiddler(this.editTitle);\n\t\t\tif(tiddler) {\n\t\t\t\t// If we've got a tiddler, the value to display is the field string value\n\t\t\t\tvalue = tiddler.getFieldString(this.editField);\n\t\t\t\tif(this.editField === \"text\") {\n\t\t\t\t\ttype = tiddler.fields.type || \"text/vnd.tiddlywiki\";\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Otherwise, we need to construct a default value for the editor\n\t\t\t\tswitch(this.editField) {\n\t\t\t\t\tcase \"text\":\n\t\t\t\t\t\tvalue = \"Type the text for the tiddler '\" + this.editTitle + \"'\";\n\t\t\t\t\t\ttype = \"text/vnd.tiddlywiki\";\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"title\":\n\t\t\t\t\t\tvalue = this.editTitle;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tvalue = \"\";\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tif(this.editDefault !== undefined) {\n\t\t\t\t\tvalue = this.editDefault;\n\t\t\t\t}\n\t\t\t}\n\t\t\tupdate = function(value) {\n\t\t\t\tvar tiddler = self.wiki.getTiddler(self.editTitle),\n\t\t\t\t\tupdateFields = {\n\t\t\t\t\t\ttitle: self.editTitle\n\t\t\t\t\t};\n\t\t\t\tupdateFields[self.editField] = value;\n\t\t\t\tself.wiki.addTiddler(new $tw.Tiddler(self.wiki.getCreationFields(),tiddler,updateFields,self.wiki.getModificationFields()));\n\t\t\t};\n\t\t}\n\t\tif(this.editType) {\n\t\t\ttype = this.editType;\n\t\t}\n\t\treturn {value: value || \"\", type: type, update: update};\n\t};\n\n\t/*\n\tHandle an edit text operation message from the toolbar\n\t*/\n\tEditTextWidget.prototype.handleEditTextOperationMessage = function(event) {\n\t\t// Prepare information about the operation\n\t\tvar operation = this.engine.createTextOperation();\n\t\t// Invoke the handler for the selected operation\n\t\tvar handler = this.editorOperations[event.param];\n\t\tif(handler) {\n\t\t\thandler.call(this,event,operation);\n\t\t}\n\t\t// Execute the operation via the engine\n\t\tvar newText = this.engine.executeTextOperation(operation);\n\t\t// Fix the tiddler height and save changes\n\t\tthis.engine.fixHeight();\n\t\tthis.saveChanges(newText);\n\t};\n\n\t/*\n\tCompute the internal state of the widget\n\t*/\n\tEditTextWidget.prototype.execute = function() {\n\t\t// Get our parameters\n\t\tthis.editTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\t\tthis.editField = this.getAttribute(\"field\",\"text\");\n\t\tthis.editIndex = this.getAttribute(\"index\");\n\t\tthis.editDefault = this.getAttribute(\"default\");\n\t\tthis.editClass = this.getAttribute(\"class\");\n\t\tthis.editPlaceholder = this.getAttribute(\"placeholder\");\n\t\tthis.editSize = this.getAttribute(\"size\");\n\t\tthis.editRows = this.getAttribute(\"rows\");\n\t\tthis.editAutoHeight = this.wiki.getTiddlerText(HEIGHT_MODE_TITLE,\"auto\");\n\t\tthis.editAutoHeight = this.getAttribute(\"autoHeight\",this.editAutoHeight === \"auto\" ? \"yes\" : \"no\") === \"yes\";\n\t\tthis.editMinHeight = this.getAttribute(\"minHeight\",DEFAULT_MIN_TEXT_AREA_HEIGHT);\n\t\tthis.editFocusPopup = this.getAttribute(\"focusPopup\");\n\t\tthis.editFocus = this.getAttribute(\"focus\");\n\t\t// Get the default editor element tag and type\n\t\tvar tag,type;\n\t\tif(this.editField === \"text\") {\n\t\t\ttag = \"textarea\";\n\t\t} else {\n\t\t\ttag = \"input\";\n\t\t\tvar fieldModule = $tw.Tiddler.fieldModules[this.editField];\n\t\t\tif(fieldModule && fieldModule.editTag) {\n\t\t\t\ttag = fieldModule.editTag;\n\t\t\t}\n\t\t\tif(fieldModule && fieldModule.editType) {\n\t\t\t\ttype = fieldModule.editType;\n\t\t\t}\n\t\t\ttype = type || \"text\";\n\t\t}\n\t\t// Get the rest of our parameters\n\t\tthis.editTag = this.getAttribute(\"tag\",tag);\n\t\tthis.editType = this.getAttribute(\"type\",type);\n\t\t// Make the child widgets\n\t\tthis.makeChildWidgets();\n\t\t// Determine whether to show the toolbar\n\t\tthis.editShowToolbar = this.wiki.getTiddlerText(ENABLE_TOOLBAR_TITLE,\"yes\");\n\t\tthis.editShowToolbar = (this.editShowToolbar === \"yes\") && !!(this.children && this.children.length > 0);\n\t};\n\n\t/*\n\tSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n\t*/\n\tEditTextWidget.prototype.refresh = function(changedTiddlers) {\n\t\tvar changedAttributes = this.computeAttributes();\n\t\t// Completely rerender if any of our attributes have changed\n\t\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || changedAttributes[\"default\"] || changedAttributes[\"class\"] || changedAttributes.placeholder || changedAttributes.size || changedAttributes.autoHeight || changedAttributes.minHeight || changedAttributes.focusPopup ||  changedAttributes.rows || changedTiddlers[HEIGHT_MODE_TITLE] || changedTiddlers[ENABLE_TOOLBAR_TITLE]) {\n\t\t\tthis.refreshSelf();\n\t\t\treturn true;\n\t\t} else if(changedTiddlers[this.editTitle]) {\n\t\t\tvar editInfo = this.getEditInfo();\n\t\t\tthis.updateEditor(editInfo.value,editInfo.type);\n\t\t}\n\t\tthis.engine.fixHeight();\n\t\tif(this.editShowToolbar) {\n\t\t\treturn this.refreshChildren(changedTiddlers);\t\t\t\n\t\t} else {\n\t\t\treturn false;\n\t\t}\n\t};\n\n\t/*\n\tUpdate the editor with new text. This method is separate from updateEditorDomNode()\n\tso that subclasses can override updateEditor() and still use updateEditorDomNode()\n\t*/\n\tEditTextWidget.prototype.updateEditor = function(text,type) {\n\t\tthis.updateEditorDomNode(text,type);\n\t};\n\n\t/*\n\tUpdate the editor dom node with new text\n\t*/\n\tEditTextWidget.prototype.updateEditorDomNode = function(text,type) {\n\t\tthis.engine.setText(text,type);\n\t};\n\n\t/*\n\tSave changes back to the tiddler store\n\t*/\n\tEditTextWidget.prototype.saveChanges = function(text) {\n\t\tvar editInfo = this.getEditInfo();\n\t\tif(text !== editInfo.value) {\n\t\t\teditInfo.update(text);\n\t\t}\n\t};\n\n\t/*\n\tHandle a dom \"keydown\" event, which we'll bubble up to our container for the keyboard widgets benefit\n\t*/\n\tEditTextWidget.prototype.handleKeydownEvent = function(event) {\n\t\t// Check for a keyboard shortcut\n\t\tif(this.toolbarNode) {\n\t\t\tvar shortcutElements = this.toolbarNode.querySelectorAll(\"[data-tw-keyboard-shortcut]\");\n\t\t\tfor(var index=0; index<shortcutElements.length; index++) {\n\t\t\t\tvar el = shortcutElements[index],\n\t\t\t\t\tshortcutData = el.getAttribute(\"data-tw-keyboard-shortcut\"),\n\t\t\t\t\tkeyInfoArray = $tw.keyboardManager.parseKeyDescriptors(shortcutData,{\n\t\t\t\t\t\twiki: this.wiki\n\t\t\t\t\t});\n\t\t\t\tif($tw.keyboardManager.checkKeyDescriptors(event,keyInfoArray)) {\n\t\t\t\t\tvar clickEvent = this.document.createEvent(\"Events\");\n\t\t\t\t    clickEvent.initEvent(\"click\",true,false);\n\t\t\t\t    el.dispatchEvent(clickEvent);\n\t\t\t\t\tevent.preventDefault();\n\t\t\t\t\tevent.stopPropagation();\n\t\t\t\t\treturn true;\t\t\t\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Propogate the event to the container\n\t\tif(this.propogateKeydownEvent(event)) {\n\t\t\t// Ignore the keydown if it was already handled\n\t\t\tevent.preventDefault();\n\t\t\tevent.stopPropagation();\n\t\t\treturn true;\n\t\t}\n\t\t// Otherwise, process the keydown normally\n\t\treturn false;\n\t};\n\n\t/*\n\tPropogate keydown events to our container for the keyboard widgets benefit\n\t*/\n\tEditTextWidget.prototype.propogateKeydownEvent = function(event) {\n\t\tvar newEvent = this.document.createEventObject ? this.document.createEventObject() : this.document.createEvent(\"Events\");\n\t\tif(newEvent.initEvent) {\n\t\t\tnewEvent.initEvent(\"keydown\", true, true);\n\t\t}\n\t\tnewEvent.keyCode = event.keyCode;\n\t\tnewEvent.which = event.which;\n\t\tnewEvent.metaKey = event.metaKey;\n\t\tnewEvent.ctrlKey = event.ctrlKey;\n\t\tnewEvent.altKey = event.altKey;\n\t\tnewEvent.shiftKey = event.shiftKey;\n\t\treturn !this.parentDomNode.dispatchEvent(newEvent);\n\t};\n\n\treturn EditTextWidget;\n\n}\n\nexports.editTextWidgetFactory = editTextWidgetFactory;\n\n})();\n",
            "title": "$:/core/modules/editor/factory.js",
            "type": "application/javascript",
            "module-type": "library"
        },
        "$:/core/modules/editor/operations/bitmap/clear.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/bitmap/clear.js\ntype: application/javascript\nmodule-type: bitmapeditoroperation\n\nBitmap editor operation to clear the image\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"clear\"] = function(event) {\n\tvar ctx = this.canvasDomNode.getContext(\"2d\");\n\tctx.globalAlpha = 1;\n\tctx.fillStyle = event.paramObject.colour || \"white\";\n\tctx.fillRect(0,0,this.canvasDomNode.width,this.canvasDomNode.height);\n\t// Save changes\n\tthis.strokeEnd();\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/bitmap/clear.js",
            "type": "application/javascript",
            "module-type": "bitmapeditoroperation"
        },
        "$:/core/modules/editor/operations/bitmap/resize.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/bitmap/resize.js\ntype: application/javascript\nmodule-type: bitmapeditoroperation\n\nBitmap editor operation to resize the image\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"resize\"] = function(event) {\n\t// Get the new width\n\tvar newWidth = parseInt(event.paramObject.width || this.canvasDomNode.width,10),\n\t\tnewHeight = parseInt(event.paramObject.height || this.canvasDomNode.height,10);\n\t// Update if necessary\n\tif(newWidth > 0 && newHeight > 0 && !(newWidth === this.currCanvas.width && newHeight === this.currCanvas.height)) {\n\t\tthis.changeCanvasSize(newWidth,newHeight);\n\t}\n\t// Update the input controls\n\tthis.refreshToolbar();\n\t// Save the image into the tiddler\n\tthis.saveChanges();\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/bitmap/resize.js",
            "type": "application/javascript",
            "module-type": "bitmapeditoroperation"
        },
        "$:/core/modules/editor/operations/text/excise.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/excise.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to excise the selection to a new tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"excise\"] = function(event,operation) {\n\tvar editTiddler = this.wiki.getTiddler(this.editTitle),\n\t\teditTiddlerTitle = this.editTitle;\n\tif(editTiddler && editTiddler.fields[\"draft.of\"]) {\n\t\teditTiddlerTitle = editTiddler.fields[\"draft.of\"];\n\t}\n\tvar excisionTitle = event.paramObject.title || this.wiki.generateNewTitle(\"New Excision\");\n\tthis.wiki.addTiddler(new $tw.Tiddler(\n\t\tthis.wiki.getCreationFields(),\n\t\tthis.wiki.getModificationFields(),\n\t\t{\n\t\t\ttitle: excisionTitle,\n\t\t\ttext: operation.selection,\n\t\t\ttags: event.paramObject.tagnew === \"yes\" ?  [editTiddlerTitle] : []\n\t\t}\n\t));\n\toperation.replacement = excisionTitle;\n\tswitch(event.paramObject.type || \"transclude\") {\n\t\tcase \"transclude\":\n\t\t\toperation.replacement = \"{{\" + operation.replacement+ \"}}\";\n\t\t\tbreak;\n\t\tcase \"link\":\n\t\t\toperation.replacement = \"[[\" + operation.replacement+ \"]]\";\n\t\t\tbreak;\n\t\tcase \"macro\":\n\t\t\toperation.replacement = \"<<\" + (event.paramObject.macro || \"translink\") + \" \\\"\\\"\\\"\" + operation.replacement + \"\\\"\\\"\\\">>\";\n\t\t\tbreak;\n\t}\n\toperation.cutStart = operation.selStart;\n\toperation.cutEnd = operation.selEnd;\n\toperation.newSelStart = operation.selStart;\n\toperation.newSelEnd = operation.selStart + operation.replacement.length;\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/excise.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/make-link.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/make-link.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to make a link\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"make-link\"] = function(event,operation) {\n\tif(operation.selection) {\n\t\toperation.replacement = \"[[\" + operation.selection + \"|\" + event.paramObject.text + \"]]\";\n\t\toperation.cutStart = operation.selStart;\n\t\toperation.cutEnd = operation.selEnd;\n\t} else {\n\t\toperation.replacement = \"[[\" + event.paramObject.text + \"]]\";\n\t\toperation.cutStart = operation.selStart;\n\t\toperation.cutEnd = operation.selEnd;\n\t}\n\toperation.newSelStart = operation.selStart + operation.replacement.length;\n\toperation.newSelEnd = operation.newSelStart;\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/make-link.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/prefix-lines.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/prefix-lines.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to add a prefix to the selected lines\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"prefix-lines\"] = function(event,operation) {\n\t// Cut just past the preceding line break, or the start of the text\n\toperation.cutStart = $tw.utils.findPrecedingLineBreak(operation.text,operation.selStart);\n\t// Cut to just past the following line break, or to the end of the text\n\toperation.cutEnd = $tw.utils.findFollowingLineBreak(operation.text,operation.selEnd);\n\t// Compose the required prefix\n\tvar prefix = $tw.utils.repeat(event.paramObject.character,event.paramObject.count);\n\t// Process each line\n\tvar lines = operation.text.substring(operation.cutStart,operation.cutEnd).split(/\\r?\\n/mg);\n\t$tw.utils.each(lines,function(line,index) {\n\t\t// Remove and count any existing prefix characters\n\t\tvar count = 0;\n\t\twhile(line.charAt(0) === event.paramObject.character) {\n\t\t\tline = line.substring(1);\n\t\t\tcount++;\n\t\t}\n\t\t// Remove any whitespace\n\t\twhile(line.charAt(0) === \" \") {\n\t\t\tline = line.substring(1);\n\t\t}\n\t\t// We're done if we removed the exact required prefix, otherwise add it\n\t\tif(count !== event.paramObject.count) {\n\t\t\t// Apply the prefix\n\t\t\tline =  prefix + \" \" + line;\n\t\t}\n\t\t// Save the modified line\n\t\tlines[index] = line;\n\t});\n\t// Stitch the replacement text together and set the selection\n\toperation.replacement = lines.join(\"\\n\");\n\tif(lines.length === 1) {\n\t\toperation.newSelStart = operation.cutStart + operation.replacement.length;\n\t\toperation.newSelEnd = operation.newSelStart;\n\t} else {\n\t\toperation.newSelStart = operation.cutStart;\n\t\toperation.newSelEnd = operation.newSelStart + operation.replacement.length;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/prefix-lines.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/replace-all.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/replace-all.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to replace the entire text\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"replace-all\"] = function(event,operation) {\n\toperation.cutStart = 0;\n\toperation.cutEnd = operation.text.length;\n\toperation.replacement = event.paramObject.text;\n\toperation.newSelStart = 0;\n\toperation.newSelEnd = operation.replacement.length;\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/replace-all.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/replace-selection.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/replace-selection.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to replace the selection\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"replace-selection\"] = function(event,operation) {\n\toperation.replacement = event.paramObject.text;\n\toperation.cutStart = operation.selStart;\n\toperation.cutEnd = operation.selEnd;\n\toperation.newSelStart = operation.selStart;\n\toperation.newSelEnd = operation.selStart + operation.replacement.length;\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/replace-selection.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/wrap-lines.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/wrap-lines.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to wrap the selected lines with a prefix and suffix\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"wrap-lines\"] = function(event,operation) {\n\t// Cut just past the preceding line break, or the start of the text\n\toperation.cutStart = $tw.utils.findPrecedingLineBreak(operation.text,operation.selStart);\n\t// Cut to just past the following line break, or to the end of the text\n\toperation.cutEnd = $tw.utils.findFollowingLineBreak(operation.text,operation.selEnd);\n\t// Add the prefix and suffix\n\toperation.replacement = event.paramObject.prefix + \"\\n\" +\n\t\t\t\toperation.text.substring(operation.cutStart,operation.cutEnd) + \"\\n\" +\n\t\t\t\tevent.paramObject.suffix + \"\\n\";\n\toperation.newSelStart = operation.cutStart + event.paramObject.prefix.length + 1;\n\toperation.newSelEnd = operation.newSelStart + (operation.cutEnd - operation.cutStart);\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/wrap-lines.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/wrap-selection.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/wrap-selection.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to wrap the selection with the specified prefix and suffix\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"wrap-selection\"] = function(event,operation) {\n\tif(operation.selStart === operation.selEnd) {\n\t\t// No selection; check if we're within the prefix/suffix\n\t\tif(operation.text.substring(operation.selStart - event.paramObject.prefix.length,operation.selStart + event.paramObject.suffix.length) === event.paramObject.prefix + event.paramObject.suffix) {\n\t\t\t// Remove the prefix and suffix unless they comprise the entire text\n\t\t\tif(operation.selStart > event.paramObject.prefix.length || (operation.selEnd + event.paramObject.suffix.length) < operation.text.length ) {\n\t\t\t\toperation.cutStart = operation.selStart - event.paramObject.prefix.length;\n\t\t\t\toperation.cutEnd = operation.selEnd + event.paramObject.suffix.length;\n\t\t\t\toperation.replacement = \"\";\n\t\t\t\toperation.newSelStart = operation.cutStart;\n\t\t\t\toperation.newSelEnd = operation.newSelStart;\n\t\t\t}\n\t\t} else {\n\t\t\t// Wrap the cursor instead\n\t\t\toperation.cutStart = operation.selStart;\n\t\t\toperation.cutEnd = operation.selEnd;\n\t\t\toperation.replacement = event.paramObject.prefix + event.paramObject.suffix;\n\t\t\toperation.newSelStart = operation.selStart + event.paramObject.prefix.length;\n\t\t\toperation.newSelEnd = operation.newSelStart;\n\t\t}\n\t} else if(operation.text.substring(operation.selStart,operation.selStart + event.paramObject.prefix.length) === event.paramObject.prefix && operation.text.substring(operation.selEnd - event.paramObject.suffix.length,operation.selEnd) === event.paramObject.suffix) {\n\t\t// Prefix and suffix are already present, so remove them\n\t\toperation.cutStart = operation.selStart;\n\t\toperation.cutEnd = operation.selEnd;\n\t\toperation.replacement = operation.selection.substring(event.paramObject.prefix.length,operation.selection.length - event.paramObject.suffix.length);\n\t\toperation.newSelStart = operation.selStart;\n\t\toperation.newSelEnd = operation.selStart + operation.replacement.length;\n\t} else {\n\t\t// Add the prefix and suffix\n\t\toperation.cutStart = operation.selStart;\n\t\toperation.cutEnd = operation.selEnd;\n\t\toperation.replacement = event.paramObject.prefix + operation.selection + event.paramObject.suffix;\n\t\toperation.newSelStart = operation.selStart;\n\t\toperation.newSelEnd = operation.selStart + operation.replacement.length;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/wrap-selection.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/filters/addprefix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/addprefix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for adding a prefix to each title in the list. This is\nespecially useful in contexts where only a filter expression is allowed\nand macro substitution isn't available.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.addprefix = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(operator.operand + title);\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/addprefix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/addsuffix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/addsuffix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for adding a suffix to each title in the list. This is\nespecially useful in contexts where only a filter expression is allowed\nand macro substitution isn't available.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.addsuffix = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title + operator.operand);\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/addsuffix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/after.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/after.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddler from the current list that is after the tiddler named in the operand.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.after = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\tvar index = results.indexOf(operator.operand);\n\tif(index === -1 || index > (results.length - 2)) {\n\t\treturn [];\n\t} else {\n\t\treturn [results[index + 1]];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/after.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/all/current.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/current.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[current]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.current = function(source,prefix,options) {\n\tvar currTiddlerTitle = options.widget && options.widget.getVariable(\"currentTiddler\");\n\tif(currTiddlerTitle) {\n\t\treturn [currTiddlerTitle];\n\t} else {\n\t\treturn [];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/current.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all/missing.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/missing.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[missing]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.missing = function(source,prefix,options) {\n\treturn options.wiki.getMissingTitles();\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/missing.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all/orphans.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/orphans.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[orphans]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.orphans = function(source,prefix,options) {\n\treturn options.wiki.getOrphanTitles();\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/orphans.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all/shadows.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/shadows.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[shadows]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.shadows = function(source,prefix,options) {\n\treturn options.wiki.allShadowTitles();\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/shadows.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all/tiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/tiddlers.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[tiddlers]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tiddlers = function(source,prefix,options) {\n\treturn options.wiki.allTitles();\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/tiddlers.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for selecting tiddlers\n\n[all[shadows+tiddlers]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar allFilterOperators;\n\nfunction getAllFilterOperators() {\n\tif(!allFilterOperators) {\n\t\tallFilterOperators = {};\n\t\t$tw.modules.applyMethods(\"allfilteroperator\",allFilterOperators);\n\t}\n\treturn allFilterOperators;\n}\n\n/*\nExport our filter function\n*/\nexports.all = function(source,operator,options) {\n\t// Get our suboperators\n\tvar allFilterOperators = getAllFilterOperators();\n\t// Cycle through the suboperators accumulating their results\n\tvar results = [],\n\t\tsubops = operator.operand.split(\"+\");\n\t// Check for common optimisations\n\tif(subops.length === 1 && subops[0] === \"\") {\n\t\treturn source;\n\t} else if(subops.length === 1 && subops[0] === \"tiddlers\") {\n\t\treturn options.wiki.each;\n\t} else if(subops.length === 1 && subops[0] === \"shadows\") {\n\t\treturn options.wiki.eachShadow;\n\t} else if(subops.length === 2 && subops[0] === \"tiddlers\" && subops[1] === \"shadows\") {\n\t\treturn options.wiki.eachTiddlerPlusShadows;\n\t} else if(subops.length === 2 && subops[0] === \"shadows\" && subops[1] === \"tiddlers\") {\n\t\treturn options.wiki.eachShadowPlusTiddlers;\n\t}\n\t// Do it the hard way\n\tfor(var t=0; t<subops.length; t++) {\n\t\tvar subop = allFilterOperators[subops[t]];\n\t\tif(subop) {\n\t\t\t$tw.utils.pushTop(results,subop(source,operator.prefix,options));\n\t\t}\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/backlinks.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/backlinks.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning all the backlinks from a tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.backlinks = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.pushTop(results,options.wiki.getTiddlerBacklinks(title));\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/backlinks.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/before.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/before.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddler from the current list that is before the tiddler named in the operand.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.before = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\tvar index = results.indexOf(operator.operand);\n\tif(index <= 0) {\n\t\treturn [];\n\t} else {\n\t\treturn [results[index - 1]];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/before.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/commands.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/commands.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the commands available in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.commands = function(source,operator,options) {\n\tvar results = [];\n\t$tw.utils.each($tw.commands,function(commandInfo,name) {\n\t\tresults.push(name);\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/commands.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/days.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/days.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that selects tiddlers with a specified date field within a specified date interval.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.days = function(source,operator,options) {\n\tvar results = [],\n\t\tfieldName = operator.suffix || \"modified\",\n\t\tdayInterval = (parseInt(operator.operand,10)||0),\n\t\tdayIntervalSign = $tw.utils.sign(dayInterval),\n\t\ttargetTimeStamp = (new Date()).setHours(0,0,0,0) + 1000*60*60*24*dayInterval,\n\t\tisWithinDays = function(dateField) {\n\t\t\tvar sign = $tw.utils.sign(targetTimeStamp - (new Date(dateField)).setHours(0,0,0,0));\n\t\t\treturn sign === 0 || sign === dayIntervalSign;\n\t\t};\n\n\tif(operator.prefix === \"!\") {\n\t\ttargetTimeStamp = targetTimeStamp - 1000*60*60*24*dayIntervalSign;\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && tiddler.fields[fieldName]) {\n\t\t\t\tif(!isWithinDays($tw.utils.parseDate(tiddler.fields[fieldName]))) {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && tiddler.fields[fieldName]) {\n\t\t\t\tif(isWithinDays($tw.utils.parseDate(tiddler.fields[fieldName]))) {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/days.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/each.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/each.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that selects one tiddler for each unique value of the specified field.\nWith suffix \"list\", selects all tiddlers that are values in a specified list field.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.each = function(source,operator,options) {\n\tvar results =[] ,\n\t\tvalue,values = {},\n\t\tfield = operator.operand || \"title\";\n\tif(operator.suffix !== \"list-item\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler) {\n\t\t\t\tvalue = (field === \"title\") ? title : tiddler.getFieldString(field);\n\t\t\t\tif(!$tw.utils.hop(values,value)) {\n\t\t\t\t\tvalues[value] = true;\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler) {\n\t\t\t\t$tw.utils.each(\n\t\t\t\t\toptions.wiki.getTiddlerList(title,field),\n\t\t\t\t\tfunction(value) {\n\t\t\t\t\t\tif(!$tw.utils.hop(values,value)) {\n\t\t\t\t\t\t\tvalues[value] = true;\n\t\t\t\t\t\t\tresults.push(value);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/each.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/eachday.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/eachday.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that selects one tiddler for each unique day covered by the specified date field\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.eachday = function(source,operator,options) {\n\tvar results = [],\n\t\tvalues = [],\n\t\tfieldName = operator.operand || \"modified\";\n\t// Function to convert a date/time to a date integer\n\tvar toDate = function(value) {\n\t\tvalue = (new Date(value)).setHours(0,0,0,0);\n\t\treturn value+0;\n\t};\n\tsource(function(tiddler,title) {\n\t\tif(tiddler && tiddler.fields[fieldName]) {\n\t\t\tvar value = toDate($tw.utils.parseDate(tiddler.fields[fieldName]));\n\t\t\tif(values.indexOf(value) === -1) {\n\t\t\t\tvalues.push(value);\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/eachday.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/editiondescription.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/editiondescription.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the descriptions of the specified edition names\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.editiondescription = function(source,operator,options) {\n\tvar results = [],\n\t\teditionInfo = $tw.utils.getEditionInfo();\n\tif(editionInfo) {\n\t\tsource(function(tiddler,title) {\n\t\t\tif($tw.utils.hop(editionInfo,title)) {\n\t\t\t\tresults.push(editionInfo[title].description || \"\");\t\t\t\t\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/editiondescription.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/editions.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/editions.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the available editions in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.editions = function(source,operator,options) {\n\tvar results = [],\n\t\teditionInfo = $tw.utils.getEditionInfo();\n\tif(editionInfo) {\n\t\t$tw.utils.each(editionInfo,function(info,name) {\n\t\t\tresults.push(name);\n\t\t});\n\t}\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/editions.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/field.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/field.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for comparing fields for equality\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.field = function(source,operator,options) {\n\tvar results = [],\n\t\tfieldname = (operator.suffix || operator.operator || \"title\").toLowerCase();\n\tif(operator.prefix === \"!\") {\n\t\tif(operator.regexp) {\n\t\t\tsource(function(tiddler,title) {\n\t\t\t\tif(tiddler) {\n\t\t\t\t\tvar text = tiddler.getFieldString(fieldname);\n\t\t\t\t\tif(text !== null && !operator.regexp.exec(text)) {\n\t\t\t\t\t\tresults.push(title);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t});\n\t\t} else {\n\t\t\tsource(function(tiddler,title) {\n\t\t\t\tif(tiddler) {\n\t\t\t\t\tvar text = tiddler.getFieldString(fieldname);\n\t\t\t\t\tif(text !== null && text !== operator.operand) {\n\t\t\t\t\t\tresults.push(title);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t} else {\n\t\tif(operator.regexp) {\n\t\t\tsource(function(tiddler,title) {\n\t\t\t\tif(tiddler) {\n\t\t\t\t\tvar text = tiddler.getFieldString(fieldname);\n\t\t\t\t\tif(text !== null && !!operator.regexp.exec(text)) {\n\t\t\t\t\t\tresults.push(title);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t} else {\n\t\t\tsource(function(tiddler,title) {\n\t\t\t\tif(tiddler) {\n\t\t\t\t\tvar text = tiddler.getFieldString(fieldname);\n\t\t\t\t\tif(text !== null && text === operator.operand) {\n\t\t\t\t\t\tresults.push(title);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/field.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/fields.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/fields.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the fields on the selected tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.fields = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tif(tiddler) {\n\t\t\tfor(var fieldName in tiddler.fields) {\n\t\t\t\t$tw.utils.pushTop(results,fieldName);\n\t\t\t}\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/fields.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/get.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/get.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for replacing tiddler titles by the value of the field specified in the operand.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.get = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tif(tiddler) {\n\t\t\tvar value = tiddler.getFieldString(operator.operand);\n\t\t\tif(value) {\n\t\t\t\tresults.push(value);\n\t\t\t}\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/get.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/getindex.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/getindex.js\ntype: application/javascript\nmodule-type: filteroperator\n\nreturns the value at a given index of datatiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.getindex = function(source,operator,options) {\n\tvar data,title,results = [];\n\tif(operator.operand){\n\t\tsource(function(tiddler,title) {\n\t\t\ttitle = tiddler ? tiddler.fields.title : title;\n\t\t\tdata = options.wiki.extractTiddlerDataItem(tiddler,operator.operand);\n\t\t\tif(data) {\n\t\t\t\tresults.push(data);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/getindex.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/has.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/has.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking if a tiddler has the specified field\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.has = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!tiddler || (tiddler && (!$tw.utils.hop(tiddler.fields,operator.operand) || tiddler.fields[operator.operand] === \"\"))) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && $tw.utils.hop(tiddler.fields,operator.operand) && !(tiddler.fields[operator.operand] === \"\" || tiddler.fields[operator.operand].length === 0)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/has.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/haschanged.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/haschanged.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returns tiddlers from the list that have a non-zero changecount.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.haschanged = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.getChangeCount(title) === 0) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.getChangeCount(title) > 0) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/haschanged.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/indexes.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/indexes.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the indexes of a data tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.indexes = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tvar data = options.wiki.getTiddlerDataCached(title);\n\t\tif(data) {\n\t\t\t$tw.utils.pushTop(results,Object.keys(data));\n\t\t}\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/indexes.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/is/current.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/current.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[current]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.current = function(source,prefix,options) {\n\tvar results = [],\n\t\tcurrTiddlerTitle = options.widget && options.widget.getVariable(\"currentTiddler\");\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title !== currTiddlerTitle) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title === currTiddlerTitle) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/current.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/image.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/image.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[image]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.image = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.isImageTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.isImageTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/image.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/missing.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/missing.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[missing]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.missing = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.tiddlerExists(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.tiddlerExists(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/missing.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/orphan.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/orphan.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[orphan]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.orphan = function(source,prefix,options) {\n\tvar results = [],\n\t\torphanTitles = options.wiki.getOrphanTitles();\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(orphanTitles.indexOf(title) === -1) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(orphanTitles.indexOf(title) !== -1) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/orphan.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/shadow.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/shadow.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[shadow]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.shadow = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.isShadowTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.isShadowTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/shadow.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/system.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/system.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[system]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.system = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.isSystemTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.isSystemTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/system.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/tag.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/tag.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[tag]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tag = function(source,prefix,options) {\n\tvar results = [],\n\t\ttagMap = options.wiki.getTagMap();\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!$tw.utils.hop(tagMap,title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif($tw.utils.hop(tagMap,title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/tag.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/tiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/tiddler.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[tiddler]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tiddler = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.tiddlerExists(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.tiddlerExists(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/tiddler.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking tiddler properties\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar isFilterOperators;\n\nfunction getIsFilterOperators() {\n\tif(!isFilterOperators) {\n\t\tisFilterOperators = {};\n\t\t$tw.modules.applyMethods(\"isfilteroperator\",isFilterOperators);\n\t}\n\treturn isFilterOperators;\n}\n\n/*\nExport our filter function\n*/\nexports.is = function(source,operator,options) {\n\t// Dispatch to the correct isfilteroperator\n\tvar isFilterOperators = getIsFilterOperators();\n\tvar isFilterOperator = isFilterOperators[operator.operand];\n\tif(isFilterOperator) {\n\t\treturn isFilterOperator(source,operator.prefix,options);\n\t} else {\n\t\treturn [$tw.language.getString(\"Error/IsFilterOperator\")];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/limit.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/limit.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for chopping the results to a specified maximum number of entries\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.limit = function(source,operator,options) {\n\tvar results = [];\n\t// Convert to an array\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\t// Slice the array if necessary\n\tvar limit = Math.min(results.length,parseInt(operator.operand,10));\n\tif(operator.prefix === \"!\") {\n\t\tresults = results.slice(-limit);\n\t} else {\n\t\tresults = results.slice(0,limit);\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/limit.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/links.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/links.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning all the links from a tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.links = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.pushTop(results,options.wiki.getTiddlerLinks(title));\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/links.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/list.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/list.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddlers whose title is listed in the operand tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.list = function(source,operator,options) {\n\tvar results = [],\n\t\ttr = $tw.utils.parseTextReference(operator.operand),\n\t\tcurrTiddlerTitle = options.widget && options.widget.getVariable(\"currentTiddler\"),\n\t\tlist = options.wiki.getTiddlerList(tr.title || currTiddlerTitle,tr.field,tr.index);\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(list.indexOf(title) === -1) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tresults = list;\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/list.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/listed.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/listed.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning all tiddlers that have the selected tiddlers in a list\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.listed = function(source,operator,options) {\n\tvar field = operator.operand || \"list\",\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.pushTop(results,options.wiki.findListingsOfTiddler(title,field));\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/listed.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/listops.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/listops.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operators for manipulating the current selection list\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nReverse list\n*/\nexports.reverse = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.unshift(title);\n\t});\n\treturn results;\n};\n\n/*\nFirst entry/entries in list\n*/\nexports.first = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(0,count);\n};\n\n/*\nLast entry/entries in list\n*/\nexports.last = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(-count);\n};\n\n/*\nAll but the first entry/entries of the list\n*/\nexports.rest = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(count);\n};\nexports.butfirst = exports.rest;\nexports.bf = exports.rest;\n\n/*\nAll but the last entry/entries of the list\n*/\nexports.butlast = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(0,-count);\n};\nexports.bl = exports.butlast;\n\n/*\nThe nth member of the list\n*/\nexports.nth = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(count - 1,count);\n};\n\n})();\n",
            "title": "$:/core/modules/filters/listops.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/modules.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/modules.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the titles of the modules of a given type in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.modules = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.each($tw.modules.types[title],function(moduleInfo,moduleName) {\n\t\t\tresults.push(moduleName);\n\t\t});\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/modules.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/moduletypes.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/moduletypes.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the module types in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.moduletypes = function(source,operator,options) {\n\tvar results = [];\n\t$tw.utils.each($tw.modules.types,function(moduleInfo,type) {\n\t\tresults.push(type);\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/moduletypes.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/next.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/next.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddler whose title occurs next in the list supplied in the operand tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.next = function(source,operator,options) {\n\tvar results = [],\n\t\tlist = options.wiki.getTiddlerList(operator.operand);\n\tsource(function(tiddler,title) {\n\t\tvar match = list.indexOf(title);\n\t\t// increment match and then test if result is in range\n\t\tmatch++;\n\t\tif(match > 0 && match < list.length) {\n\t\t\tresults.push(list[match]);\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/next.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/plugintiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/plugintiddlers.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the titles of the shadow tiddlers within a plugin\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.plugintiddlers = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tvar pluginInfo = options.wiki.getPluginInfo(title) || options.wiki.getTiddlerDataCached(title,{tiddlers:[]});\n\t\tif(pluginInfo && pluginInfo.tiddlers) {\n\t\t\t$tw.utils.each(pluginInfo.tiddlers,function(fields,title) {\n\t\t\t\tresults.push(title);\n\t\t\t});\n\t\t}\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/plugintiddlers.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/prefix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/prefix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking if a title starts with a prefix\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.prefix = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title.substr(0,operator.operand.length) !== operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title.substr(0,operator.operand.length) === operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/prefix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/previous.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/previous.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddler whose title occurs immediately prior in the list supplied in the operand tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.previous = function(source,operator,options) {\n\tvar results = [],\n\t\tlist = options.wiki.getTiddlerList(operator.operand);\n\tsource(function(tiddler,title) {\n\t\tvar match = list.indexOf(title);\n\t\t// increment match and then test if result is in range\n\t\tmatch--;\n\t\tif(match >= 0) {\n\t\t\tresults.push(list[match]);\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/previous.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/regexp.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/regexp.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for regexp matching\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.regexp = function(source,operator,options) {\n\tvar results = [],\n\t\tfieldname = (operator.suffix || \"title\").toLowerCase(),\n\t\tregexpString, regexp, flags = \"\", match,\n\t\tgetFieldString = function(tiddler,title) {\n\t\t\tif(tiddler) {\n\t\t\t\treturn tiddler.getFieldString(fieldname);\n\t\t\t} else if(fieldname === \"title\") {\n\t\t\t\treturn title;\n\t\t\t} else {\n\t\t\t\treturn null;\n\t\t\t}\n\t\t};\n\t// Process flags and construct regexp\n\tregexpString = operator.operand;\n\tmatch = /^\\(\\?([gim]+)\\)/.exec(regexpString);\n\tif(match) {\n\t\tflags = match[1];\n\t\tregexpString = regexpString.substr(match[0].length);\n\t} else {\n\t\tmatch = /\\(\\?([gim]+)\\)$/.exec(regexpString);\n\t\tif(match) {\n\t\t\tflags = match[1];\n\t\t\tregexpString = regexpString.substr(0,regexpString.length - match[0].length);\n\t\t}\n\t}\n\ttry {\n\t\tregexp = new RegExp(regexpString,flags);\n\t} catch(e) {\n\t\treturn [\"\" + e];\n\t}\n\t// Process the incoming tiddlers\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tvar text = getFieldString(tiddler,title);\n\t\t\tif(text !== null) {\n\t\t\t\tif(!regexp.exec(text)) {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tvar text = getFieldString(tiddler,title);\n\t\t\tif(text !== null) {\n\t\t\t\tif(!!regexp.exec(text)) {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/regexp.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/removeprefix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/removeprefix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for removing a prefix from each title in the list. Titles that do not start with the prefix are removed.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.removeprefix = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tif(title.substr(0,operator.operand.length) === operator.operand) {\n\t\t\tresults.push(title.substr(operator.operand.length));\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/removeprefix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/removesuffix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/removesuffix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for removing a suffix from each title in the list. Titles that do not end with the suffix are removed.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.removesuffix = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tif(title.substr(-operator.operand.length) === operator.operand) {\n\t\t\tresults.push(title.substr(0,title.length - operator.operand.length));\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/removesuffix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/sameday.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/sameday.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that selects tiddlers with a modified date field on the same day as the provided value.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.sameday = function(source,operator,options) {\n\tvar results = [],\n\t\tfieldName = operator.suffix || \"modified\",\n\t\ttargetDate = (new Date($tw.utils.parseDate(operator.operand))).setHours(0,0,0,0);\n\t// Function to convert a date/time to a date integer\n\tvar isSameDay = function(dateField) {\n\t\t\treturn (new Date(dateField)).setHours(0,0,0,0) === targetDate;\n\t\t};\n\tsource(function(tiddler,title) {\n\t\tif(tiddler && tiddler.fields[fieldName]) {\n\t\t\tif(isSameDay($tw.utils.parseDate(tiddler.fields[fieldName]))) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/sameday.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/search.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/search.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for searching for the text in the operand tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.search = function(source,operator,options) {\n\tvar invert = operator.prefix === \"!\";\n\tif(operator.suffix) {\n\t\treturn options.wiki.search(operator.operand,{\n\t\t\tsource: source,\n\t\t\tinvert: invert,\n\t\t\tfield: operator.suffix\n\t\t});\n\t} else {\n\t\treturn options.wiki.search(operator.operand,{\n\t\t\tsource: source,\n\t\t\tinvert: invert\n\t\t});\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/search.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/shadowsource.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/shadowsource.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the source plugins for shadow tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.shadowsource = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tvar source = options.wiki.getShadowSource(title);\n\t\tif(source) {\n\t\t\t$tw.utils.pushTop(results,source);\n\t\t}\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/shadowsource.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/sort.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/sort.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for sorting\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.sort = function(source,operator,options) {\n\tvar results = prepare_results(source);\n\toptions.wiki.sortTiddlers(results,operator.operand || \"title\",operator.prefix === \"!\",false,false);\n\treturn results;\n};\n\nexports.nsort = function(source,operator,options) {\n\tvar results = prepare_results(source);\n\toptions.wiki.sortTiddlers(results,operator.operand || \"title\",operator.prefix === \"!\",false,true);\n\treturn results;\n};\n\nexports.sortcs = function(source,operator,options) {\n\tvar results = prepare_results(source);\n\toptions.wiki.sortTiddlers(results,operator.operand || \"title\",operator.prefix === \"!\",true,false);\n\treturn results;\n};\n\nexports.nsortcs = function(source,operator,options) {\n\tvar results = prepare_results(source);\n\toptions.wiki.sortTiddlers(results,operator.operand || \"title\",operator.prefix === \"!\",true,true);\n\treturn results;\n};\n\nvar prepare_results = function (source) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/sort.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/splitbefore.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/splitbefore.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that splits each result on the first occurance of the specified separator and returns the unique values.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.splitbefore = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tvar parts = title.split(operator.operand);\n\t\tif(parts.length === 1) {\n\t\t\t$tw.utils.pushTop(results,parts[0]);\n\t\t} else {\n\t\t\t$tw.utils.pushTop(results,parts[0] + operator.operand);\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/splitbefore.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/storyviews.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/storyviews.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the story views in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.storyviews = function(source,operator,options) {\n\tvar results = [],\n\t\tstoryviews = {};\n\t$tw.modules.applyMethods(\"storyview\",storyviews);\n\t$tw.utils.each(storyviews,function(info,name) {\n\t\tresults.push(name);\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/storyviews.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/suffix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/suffix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking if a title ends with a suffix\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.suffix = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title.substr(-operator.operand.length) !== operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title.substr(-operator.operand.length) === operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/suffix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/tag.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/tag.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking for the presence of a tag\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tag = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && !tiddler.hasTag(operator.operand)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && tiddler.hasTag(operator.operand)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t\tresults = options.wiki.sortByList(results,operator.operand);\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/tag.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/tagging.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/tagging.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning all tiddlers that are tagged with the selected tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tagging = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.pushTop(results,options.wiki.getTiddlersWithTag(title));\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/tagging.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/tags.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/tags.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning all the tags of the selected tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tags = function(source,operator,options) {\n\tvar tags = {};\n\tsource(function(tiddler,title) {\n\t\tvar t, length;\n\t\tif(tiddler && tiddler.fields.tags) {\n\t\t\tfor(t=0, length=tiddler.fields.tags.length; t<length; t++) {\n\t\t\t\ttags[tiddler.fields.tags[t]] = true;\n\t\t\t}\n\t\t}\n\t});\n\treturn Object.keys(tags);\n};\n\n})();\n",
            "title": "$:/core/modules/filters/tags.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/title.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/title.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for comparing title fields for equality\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.title = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && tiddler.fields.title !== operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tresults.push(operator.operand);\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/title.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/untagged.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/untagged.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning all the selected tiddlers that are untagged\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.untagged = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && $tw.utils.isArray(tiddler.fields.tags) && tiddler.fields.tags.length > 0) {\n\t\t\t\t$tw.utils.pushTop(results,title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!tiddler || !tiddler.hasField(\"tags\") || ($tw.utils.isArray(tiddler.fields.tags) && tiddler.fields.tags.length === 0)) {\n\t\t\t\t$tw.utils.pushTop(results,title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/untagged.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/wikiparserrules.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/wikiparserrules.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the wiki parser rules in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.wikiparserrules = function(source,operator,options) {\n\tvar results = [];\n\t$tw.utils.each($tw.modules.types.wikirule,function(mod) {\n\t\tvar exp = mod.exports;\n\t\tif(exp.types[operator.operand]) {\n\t\t\tresults.push(exp.name);\n\t\t}\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/wikiparserrules.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/x-listops.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/x-listops.js\ntype: application/javascript\nmodule-type: filteroperator\n\nExtended filter operators to manipulate the current list.\n\n\\*/\n(function () {\n\n    /*jslint node: true, browser: true */\n    /*global $tw: false */\n    \"use strict\";\n\n    /*\n    Fetch titles from the current list\n    */\n    var prepare_results = function (source) {\n    var results = [];\n        source(function (tiddler, title) {\n            results.push(title);\n        });\n        return results;\n    };\n\n    /*\n    Moves a number of items from the tail of the current list before the item named in the operand\n    */\n    exports.putbefore = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand),\n            count = parseInt(operator.suffix) || 1;\n        return (index === -1) ?\n            results.slice(0, -1) :\n            results.slice(0, index).concat(results.slice(-count)).concat(results.slice(index, -count));\n    };\n\n    /*\n    Moves a number of items from the tail of the current list after the item named in the operand\n    */\n    exports.putafter = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand),\n            count = parseInt(operator.suffix) || 1;\n        return (index === -1) ?\n            results.slice(0, -1) :\n            results.slice(0, index + 1).concat(results.slice(-count)).concat(results.slice(index + 1, -count));\n    };\n\n    /*\n    Replaces the item named in the operand with a number of items from the tail of the current list\n    */\n    exports.replace = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand),\n            count = parseInt(operator.suffix) || 1;\n        return (index === -1) ?\n            results.slice(0, -count) :\n            results.slice(0, index).concat(results.slice(-count)).concat(results.slice(index + 1, -count));\n    };\n\n    /*\n    Moves a number of items from the tail of the current list to the head of the list\n    */\n    exports.putfirst = function (source, operator) {\n        var results = prepare_results(source),\n            count = parseInt(operator.suffix) || 1;\n        return results.slice(-count).concat(results.slice(0, -count));\n    };\n\n    /*\n    Moves a number of items from the head of the current list to the tail of the list\n    */\n    exports.putlast = function (source, operator) {\n        var results = prepare_results(source),\n            count = parseInt(operator.suffix) || 1;\n        return results.slice(count).concat(results.slice(0, count));\n    };\n\n    /*\n    Moves the item named in the operand a number of places forward or backward in the list\n    */\n    exports.move = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand),\n            count = parseInt(operator.suffix) || 1,\n            marker = results.splice(index, 1);\n        return results.slice(0, index + count).concat(marker).concat(results.slice(index + count));\n    };\n\n    /*\n    Returns the items from the current list that are after the item named in the operand\n    */\n    exports.allafter = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand);\n        return (index === -1 || index > (results.length - 2)) ? [] :\n            (operator.suffix) ? results.slice(index) :\n            results.slice(index + 1);\n    };\n\n    /*\n    Returns the items from the current list that are before the item named in the operand\n    */\n    exports.allbefore = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand);\n        return (index <= 0) ? [] :\n            (operator.suffix) ? results.slice(0, index + 1) :\n            results.slice(0, index);\n    };\n\n    /*\n    Appends the items listed in the operand array to the tail of the current list\n    */\n    exports.append = function (source, operator) {\n        var append = $tw.utils.parseStringArray(operator.operand, \"true\"),\n            results = prepare_results(source),\n            count = parseInt(operator.suffix) || append.length;\n        return (append.length === 0) ? results :\n            (operator.prefix) ? results.concat(append.slice(-count)) :\n            results.concat(append.slice(0, count));\n    };\n\n    /*\n    Prepends the items listed in the operand array to the head of the current list\n    */\n    exports.prepend = function (source, operator) {\n        var prepend = $tw.utils.parseStringArray(operator.operand, \"true\"),\n            results = prepare_results(source),\n            count = parseInt(operator.suffix) || prepend.length;\n        return (prepend.length === 0) ? results :\n            (operator.prefix) ? prepend.slice(-count).concat(results) :\n            prepend.slice(0, count).concat(results);\n    };\n\n    /*\n    Returns all items from the current list except the items listed in the operand array\n    */\n    exports.remove = function (source, operator) {\n        var array = $tw.utils.parseStringArray(operator.operand, \"true\"),\n            results = prepare_results(source),\n            count = parseInt(operator.suffix) || array.length,\n            p,\n            len,\n            index;\n        len = array.length - 1;\n        for (p = 0; p < count; ++p) {\n            if (operator.prefix) {\n                index = results.indexOf(array[len - p]);\n            } else {\n                index = results.indexOf(array[p]);\n            }\n            if (index !== -1) {\n                results.splice(index, 1);\n            }\n        }\n        return results;\n    };\n\n    /*\n    Returns all items from the current list sorted in the order of the items in the operand array\n    */\n    exports.sortby = function (source, operator) {\n        var results = prepare_results(source);\n        if (!results || results.length < 2) {\n            return results;\n        }\n        var lookup = $tw.utils.parseStringArray(operator.operand, \"true\");\n        results.sort(function (a, b) {\n            return lookup.indexOf(a) - lookup.indexOf(b);\n        });\n        return results;\n    };\n\n    /*\n    Removes all duplicate items from the current list\n    */\n    exports.unique = function (source, operator) {\n        var results = prepare_results(source);\n        var set = results.reduce(function (a, b) {\n            if (a.indexOf(b) < 0) {\n                a.push(b);\n            }\n            return a;\n        }, []);\n        return set;\n    };\n})();\n",
            "title": "$:/core/modules/filters/x-listops.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters.js\ntype: application/javascript\nmodule-type: wikimethod\n\nAdds tiddler filtering methods to the $tw.Wiki object.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nParses an operation (i.e. a run) within a filter string\n\toperators: Array of array of operator nodes into which results should be inserted\n\tfilterString: filter string\n\tp: start position within the string\nReturns the new start position, after the parsed operation\n*/\nfunction parseFilterOperation(operators,filterString,p) {\n\tvar operator, operand, bracketPos, curlyBracketPos;\n\t// Skip the starting square bracket\n\tif(filterString.charAt(p++) !== \"[\") {\n\t\tthrow \"Missing [ in filter expression\";\n\t}\n\t// Process each operator in turn\n\tdo {\n\t\toperator = {};\n\t\t// Check for an operator prefix\n\t\tif(filterString.charAt(p) === \"!\") {\n\t\t\toperator.prefix = filterString.charAt(p++);\n\t\t}\n\t\t// Get the operator name\n\t\tvar nextBracketPos = filterString.substring(p).search(/[\\[\\{<\\/]/);\n\t\tif(nextBracketPos === -1) {\n\t\t\tthrow \"Missing [ in filter expression\";\n\t\t}\n\t\tnextBracketPos += p;\n\t\tvar bracket = filterString.charAt(nextBracketPos);\n\t\toperator.operator = filterString.substring(p,nextBracketPos);\n\t\t\n\t\t// Any suffix?\n\t\tvar colon = operator.operator.indexOf(':');\n\t\tif(colon > -1) {\n\t\t\toperator.suffix = operator.operator.substring(colon + 1);\n\t\t\toperator.operator = operator.operator.substring(0,colon) || \"field\";\n\t\t}\n\t\t// Empty operator means: title\n\t\telse if(operator.operator === \"\") {\n\t\t\toperator.operator = \"title\";\n\t\t}\n\n\t\tp = nextBracketPos + 1;\n\t\tswitch (bracket) {\n\t\t\tcase \"{\": // Curly brackets\n\t\t\t\toperator.indirect = true;\n\t\t\t\tnextBracketPos = filterString.indexOf(\"}\",p);\n\t\t\t\tbreak;\n\t\t\tcase \"[\": // Square brackets\n\t\t\t\tnextBracketPos = filterString.indexOf(\"]\",p);\n\t\t\t\tbreak;\n\t\t\tcase \"<\": // Angle brackets\n\t\t\t\toperator.variable = true;\n\t\t\t\tnextBracketPos = filterString.indexOf(\">\",p);\n\t\t\t\tbreak;\n\t\t\tcase \"/\": // regexp brackets\n\t\t\t\tvar rex = /^((?:[^\\\\\\/]*|\\\\.)*)\\/(?:\\(([mygi]+)\\))?/g,\n\t\t\t\t\trexMatch = rex.exec(filterString.substring(p));\n\t\t\t\tif(rexMatch) {\n\t\t\t\t\toperator.regexp = new RegExp(rexMatch[1], rexMatch[2]);\n// DEPRECATION WARNING\nconsole.log(\"WARNING: Filter\",operator.operator,\"has a deprecated regexp operand\",operator.regexp);\n\t\t\t\t\tnextBracketPos = p + rex.lastIndex - 1;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthrow \"Unterminated regular expression in filter expression\";\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t}\n\t\t\n\t\tif(nextBracketPos === -1) {\n\t\t\tthrow \"Missing closing bracket in filter expression\";\n\t\t}\n\t\tif(!operator.regexp) {\n\t\t\toperator.operand = filterString.substring(p,nextBracketPos);\n\t\t}\n\t\tp = nextBracketPos + 1;\n\t\t\t\n\t\t// Push this operator\n\t\toperators.push(operator);\n\t} while(filterString.charAt(p) !== \"]\");\n\t// Skip the ending square bracket\n\tif(filterString.charAt(p++) !== \"]\") {\n\t\tthrow \"Missing ] in filter expression\";\n\t}\n\t// Return the parsing position\n\treturn p;\n}\n\n/*\nParse a filter string\n*/\nexports.parseFilter = function(filterString) {\n\tfilterString = filterString || \"\";\n\tvar results = [], // Array of arrays of operator nodes {operator:,operand:}\n\t\tp = 0, // Current position in the filter string\n\t\tmatch;\n\tvar whitespaceRegExp = /(\\s+)/mg,\n\t\toperandRegExp = /((?:\\+|\\-)?)(?:(\\[)|(?:\"([^\"]*)\")|(?:'([^']*)')|([^\\s\\[\\]]+))/mg;\n\twhile(p < filterString.length) {\n\t\t// Skip any whitespace\n\t\twhitespaceRegExp.lastIndex = p;\n\t\tmatch = whitespaceRegExp.exec(filterString);\n\t\tif(match && match.index === p) {\n\t\t\tp = p + match[0].length;\n\t\t}\n\t\t// Match the start of the operation\n\t\tif(p < filterString.length) {\n\t\t\toperandRegExp.lastIndex = p;\n\t\t\tmatch = operandRegExp.exec(filterString);\n\t\t\tif(!match || match.index !== p) {\n\t\t\t\tthrow $tw.language.getString(\"Error/FilterSyntax\");\n\t\t\t}\n\t\t\tvar operation = {\n\t\t\t\tprefix: \"\",\n\t\t\t\toperators: []\n\t\t\t};\n\t\t\tif(match[1]) {\n\t\t\t\toperation.prefix = match[1];\n\t\t\t\tp++;\n\t\t\t}\n\t\t\tif(match[2]) { // Opening square bracket\n\t\t\t\tp = parseFilterOperation(operation.operators,filterString,p);\n\t\t\t} else {\n\t\t\t\tp = match.index + match[0].length;\n\t\t\t}\n\t\t\tif(match[3] || match[4] || match[5]) { // Double quoted string, single quoted string or unquoted title\n\t\t\t\toperation.operators.push(\n\t\t\t\t\t{operator: \"title\", operand: match[3] || match[4] || match[5]}\n\t\t\t\t);\n\t\t\t}\n\t\t\tresults.push(operation);\n\t\t}\n\t}\n\treturn results;\n};\n\nexports.getFilterOperators = function() {\n\tif(!this.filterOperators) {\n\t\t$tw.Wiki.prototype.filterOperators = {};\n\t\t$tw.modules.applyMethods(\"filteroperator\",this.filterOperators);\n\t}\n\treturn this.filterOperators;\n};\n\nexports.filterTiddlers = function(filterString,widget,source) {\n\tvar fn = this.compileFilter(filterString);\n\treturn fn.call(this,source,widget);\n};\n\n/*\nCompile a filter into a function with the signature fn(source,widget) where:\nsource: an iterator function for the source tiddlers, called source(iterator), where iterator is called as iterator(tiddler,title)\nwidget: an optional widget node for retrieving the current tiddler etc.\n*/\nexports.compileFilter = function(filterString) {\n\tvar filterParseTree;\n\ttry {\n\t\tfilterParseTree = this.parseFilter(filterString);\n\t} catch(e) {\n\t\treturn function(source,widget) {\n\t\t\treturn [$tw.language.getString(\"Error/Filter\") + \": \" + e];\n\t\t};\n\t}\n\t// Get the hashmap of filter operator functions\n\tvar filterOperators = this.getFilterOperators();\n\t// Assemble array of functions, one for each operation\n\tvar operationFunctions = [];\n\t// Step through the operations\n\tvar self = this;\n\t$tw.utils.each(filterParseTree,function(operation) {\n\t\t// Create a function for the chain of operators in the operation\n\t\tvar operationSubFunction = function(source,widget) {\n\t\t\tvar accumulator = source,\n\t\t\t\tresults = [],\n\t\t\t\tcurrTiddlerTitle = widget && widget.getVariable(\"currentTiddler\");\n\t\t\t$tw.utils.each(operation.operators,function(operator) {\n\t\t\t\tvar operand = operator.operand,\n\t\t\t\t\toperatorFunction;\n\t\t\t\tif(!operator.operator) {\n\t\t\t\t\toperatorFunction = filterOperators.title;\n\t\t\t\t} else if(!filterOperators[operator.operator]) {\n\t\t\t\t\toperatorFunction = filterOperators.field;\n\t\t\t\t} else {\n\t\t\t\t\toperatorFunction = filterOperators[operator.operator];\n\t\t\t\t}\n\t\t\t\tif(operator.indirect) {\n\t\t\t\t\toperand = self.getTextReference(operator.operand,\"\",currTiddlerTitle);\n\t\t\t\t}\n\t\t\t\tif(operator.variable) {\n\t\t\t\t\toperand = widget.getVariable(operator.operand,{defaultValue: \"\"});\n\t\t\t\t}\n\t\t\t\t// Invoke the appropriate filteroperator module\n\t\t\t\tresults = operatorFunction(accumulator,{\n\t\t\t\t\t\t\toperator: operator.operator,\n\t\t\t\t\t\t\toperand: operand,\n\t\t\t\t\t\t\tprefix: operator.prefix,\n\t\t\t\t\t\t\tsuffix: operator.suffix,\n\t\t\t\t\t\t\tregexp: operator.regexp\n\t\t\t\t\t\t},{\n\t\t\t\t\t\t\twiki: self,\n\t\t\t\t\t\t\twidget: widget\n\t\t\t\t\t\t});\n\t\t\t\tif($tw.utils.isArray(results)) {\n\t\t\t\t\taccumulator = self.makeTiddlerIterator(results);\n\t\t\t\t} else {\n\t\t\t\t\taccumulator = results;\n\t\t\t\t}\n\t\t\t});\n\t\t\tif($tw.utils.isArray(results)) {\n\t\t\t\treturn results;\n\t\t\t} else {\n\t\t\t\tvar resultArray = [];\n\t\t\t\tresults(function(tiddler,title) {\n\t\t\t\t\tresultArray.push(title);\n\t\t\t\t});\n\t\t\t\treturn resultArray;\n\t\t\t}\n\t\t};\n\t\t// Wrap the operator functions in a wrapper function that depends on the prefix\n\t\toperationFunctions.push((function() {\n\t\t\tswitch(operation.prefix || \"\") {\n\t\t\t\tcase \"\": // No prefix means that the operation is unioned into the result\n\t\t\t\t\treturn function(results,source,widget) {\n\t\t\t\t\t\t$tw.utils.pushTop(results,operationSubFunction(source,widget));\n\t\t\t\t\t};\n\t\t\t\tcase \"-\": // The results of this operation are removed from the main result\n\t\t\t\t\treturn function(results,source,widget) {\n\t\t\t\t\t\t$tw.utils.removeArrayEntries(results,operationSubFunction(source,widget));\n\t\t\t\t\t};\n\t\t\t\tcase \"+\": // This operation is applied to the main results so far\n\t\t\t\t\treturn function(results,source,widget) {\n\t\t\t\t\t\t// This replaces all the elements of the array, but keeps the actual array so that references to it are preserved\n\t\t\t\t\t\tsource = self.makeTiddlerIterator(results);\n\t\t\t\t\t\tresults.splice(0,results.length);\n\t\t\t\t\t\t$tw.utils.pushTop(results,operationSubFunction(source,widget));\n\t\t\t\t\t};\n\t\t\t}\n\t\t})());\n\t});\n\t// Return a function that applies the operations to a source iterator of tiddler titles\n\treturn $tw.perf.measure(\"filter\",function filterFunction(source,widget) {\n\t\tif(!source) {\n\t\t\tsource = self.each;\n\t\t} else if(typeof source === \"object\") { // Array or hashmap\n\t\t\tsource = self.makeTiddlerIterator(source);\n\t\t}\n\t\tvar results = [];\n\t\t$tw.utils.each(operationFunctions,function(operationFunction) {\n\t\t\toperationFunction(results,source,widget);\n\t\t});\n\t\treturn results;\n\t});\n};\n\n})();\n",
            "title": "$:/core/modules/filters.js",
            "type": "application/javascript",
            "module-type": "wikimethod"
        },
        "$:/core/modules/info/platform.js": {
            "text": "/*\\\ntitle: $:/core/modules/info/platform.js\ntype: application/javascript\nmodule-type: info\n\nInitialise basic platform $:/info/ tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.getInfoTiddlerFields = function() {\n\tvar mapBoolean = function(value) {return value ? \"yes\" : \"no\";},\n\t\tinfoTiddlerFields = [];\n\t// Basics\n\tinfoTiddlerFields.push({title: \"$:/info/browser\", text: mapBoolean(!!$tw.browser)});\n\tinfoTiddlerFields.push({title: \"$:/info/node\", text: mapBoolean(!!$tw.node)});\n\treturn infoTiddlerFields;\n};\n\n})();\n",
            "title": "$:/core/modules/info/platform.js",
            "type": "application/javascript",
            "module-type": "info"
        },
        "$:/core/modules/keyboard.js": {
            "text": "/*\\\ntitle: $:/core/modules/keyboard.js\ntype: application/javascript\nmodule-type: global\n\nKeyboard handling utilities\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar namedKeys = {\n\t\"cancel\": 3,\n\t\"help\": 6,\n\t\"backspace\": 8,\n\t\"tab\": 9,\n\t\"clear\": 12,\n\t\"return\": 13,\n\t\"enter\": 13,\n\t\"pause\": 19,\n\t\"escape\": 27,\n\t\"space\": 32,\n\t\"page_up\": 33,\n\t\"page_down\": 34,\n\t\"end\": 35,\n\t\"home\": 36,\n\t\"left\": 37,\n\t\"up\": 38,\n\t\"right\": 39,\n\t\"down\": 40,\n\t\"printscreen\": 44,\n\t\"insert\": 45,\n\t\"delete\": 46,\n\t\"0\": 48,\n\t\"1\": 49,\n\t\"2\": 50,\n\t\"3\": 51,\n\t\"4\": 52,\n\t\"5\": 53,\n\t\"6\": 54,\n\t\"7\": 55,\n\t\"8\": 56,\n\t\"9\": 57,\n\t\"firefoxsemicolon\": 59,\n\t\"firefoxequals\": 61,\n\t\"a\": 65,\n\t\"b\": 66,\n\t\"c\": 67,\n\t\"d\": 68,\n\t\"e\": 69,\n\t\"f\": 70,\n\t\"g\": 71,\n\t\"h\": 72,\n\t\"i\": 73,\n\t\"j\": 74,\n\t\"k\": 75,\n\t\"l\": 76,\n\t\"m\": 77,\n\t\"n\": 78,\n\t\"o\": 79,\n\t\"p\": 80,\n\t\"q\": 81,\n\t\"r\": 82,\n\t\"s\": 83,\n\t\"t\": 84,\n\t\"u\": 85,\n\t\"v\": 86,\n\t\"w\": 87,\n\t\"x\": 88,\n\t\"y\": 89,\n\t\"z\": 90,\n\t\"numpad0\": 96,\n\t\"numpad1\": 97,\n\t\"numpad2\": 98,\n\t\"numpad3\": 99,\n\t\"numpad4\": 100,\n\t\"numpad5\": 101,\n\t\"numpad6\": 102,\n\t\"numpad7\": 103,\n\t\"numpad8\": 104,\n\t\"numpad9\": 105,\n\t\"multiply\": 106,\n\t\"add\": 107,\n\t\"separator\": 108,\n\t\"subtract\": 109,\n\t\"decimal\": 110,\n\t\"divide\": 111,\n\t\"f1\": 112,\n\t\"f2\": 113,\n\t\"f3\": 114,\n\t\"f4\": 115,\n\t\"f5\": 116,\n\t\"f6\": 117,\n\t\"f7\": 118,\n\t\"f8\": 119,\n\t\"f9\": 120,\n\t\"f10\": 121,\n\t\"f11\": 122,\n\t\"f12\": 123,\n\t\"f13\": 124,\n\t\"f14\": 125,\n\t\"f15\": 126,\n\t\"f16\": 127,\n\t\"f17\": 128,\n\t\"f18\": 129,\n\t\"f19\": 130,\n\t\"f20\": 131,\n\t\"f21\": 132,\n\t\"f22\": 133,\n\t\"f23\": 134,\n\t\"f24\": 135,\n\t\"firefoxminus\": 173,\n\t\"semicolon\": 186,\n\t\"equals\": 187,\n\t\"comma\": 188,\n\t\"dash\": 189,\n\t\"period\": 190,\n\t\"slash\": 191,\n\t\"backquote\": 192,\n\t\"openbracket\": 219,\n\t\"backslash\": 220,\n\t\"closebracket\": 221,\n\t\"quote\": 222\n};\n\nfunction KeyboardManager(options) {\n\tvar self = this;\n\toptions = options || \"\";\n\t// Save the named key hashmap\n\tthis.namedKeys = namedKeys;\n\t// Create a reverse mapping of code to keyname\n\tthis.keyNames = [];\n\t$tw.utils.each(namedKeys,function(keyCode,name) {\n\t\tself.keyNames[keyCode] = name.substr(0,1).toUpperCase() + name.substr(1);\n\t});\n\t// Save the platform-specific name of the \"meta\" key\n\tthis.metaKeyName = $tw.platform.isMac ? \"cmd-\" : \"win-\";\n}\n\n/*\nReturn an array of keycodes for the modifier keys ctrl, shift, alt, meta\n*/\nKeyboardManager.prototype.getModifierKeys = function() {\n\treturn [\n\t\t16, // Shift\n\t\t17, // Ctrl\n\t\t18, // Alt\n\t\t20, // CAPS LOCK\n\t\t91, // Meta (left)\n\t\t93, // Meta (right)\n\t\t224 // Meta (Firefox)\n\t]\n};\n\n/*\nParses a key descriptor into the structure:\n{\n\tkeyCode: numeric keycode\n\tshiftKey: boolean\n\taltKey: boolean\n\tctrlKey: boolean\n\tmetaKey: boolean\n}\nKey descriptors have the following format:\n\tctrl+enter\n\tctrl+shift+alt+A\n*/\nKeyboardManager.prototype.parseKeyDescriptor = function(keyDescriptor) {\n\tvar components = keyDescriptor.split(/\\+|\\-/),\n\t\tinfo = {\n\t\t\tkeyCode: 0,\n\t\t\tshiftKey: false,\n\t\t\taltKey: false,\n\t\t\tctrlKey: false,\n\t\t\tmetaKey: false\n\t\t};\n\tfor(var t=0; t<components.length; t++) {\n\t\tvar s = components[t].toLowerCase(),\n\t\t\tc = s.charCodeAt(0);\n\t\t// Look for modifier keys\n\t\tif(s === \"ctrl\") {\n\t\t\tinfo.ctrlKey = true;\n\t\t} else if(s === \"shift\") {\n\t\t\tinfo.shiftKey = true;\n\t\t} else if(s === \"alt\") {\n\t\t\tinfo.altKey = true;\n\t\t} else if(s === \"meta\" || s === \"cmd\" || s === \"win\") {\n\t\t\tinfo.metaKey = true;\n\t\t}\n\t\t// Replace named keys with their code\n\t\tif(this.namedKeys[s]) {\n\t\t\tinfo.keyCode = this.namedKeys[s];\n\t\t}\n\t}\n\tif(info.keyCode) {\n\t\treturn info;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n/*\nParse a list of key descriptors into an array of keyInfo objects. The key descriptors can be passed as an array of strings or a space separated string\n*/\nKeyboardManager.prototype.parseKeyDescriptors = function(keyDescriptors,options) {\n\tvar self = this;\n\toptions = options || {};\n\toptions.stack = options.stack || [];\n\tvar wiki = options.wiki || $tw.wiki;\n\tif(typeof keyDescriptors === \"string\" && keyDescriptors === \"\") {\n\t\treturn [];\n\t}\n\tif(!$tw.utils.isArray(keyDescriptors)) {\n\t\tkeyDescriptors = keyDescriptors.split(\" \");\n\t}\n\tvar result = [];\n\t$tw.utils.each(keyDescriptors,function(keyDescriptor) {\n\t\t// Look for a named shortcut\n\t\tif(keyDescriptor.substr(0,2) === \"((\" && keyDescriptor.substr(-2,2) === \"))\") {\n\t\t\tif(options.stack.indexOf(keyDescriptor) === -1) {\n\t\t\t\toptions.stack.push(keyDescriptor);\n\t\t\t\tvar name = keyDescriptor.substring(2,keyDescriptor.length - 2),\n\t\t\t\t\tlookupName = function(configName) {\n\t\t\t\t\t\tvar keyDescriptors = wiki.getTiddlerText(\"$:/config/\" + configName + \"/\" + name);\n\t\t\t\t\t\tif(keyDescriptors) {\n\t\t\t\t\t\t\tresult.push.apply(result,self.parseKeyDescriptors(keyDescriptors,options));\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\tlookupName(\"shortcuts\");\n\t\t\t\tlookupName($tw.platform.isMac ? \"shortcuts-mac\" : \"shortcuts-not-mac\");\n\t\t\t\tlookupName($tw.platform.isWindows ? \"shortcuts-windows\" : \"shortcuts-not-windows\");\n\t\t\t\tlookupName($tw.platform.isLinux ? \"shortcuts-linux\" : \"shortcuts-not-linux\");\n\t\t\t}\n\t\t} else {\n\t\t\tresult.push(self.parseKeyDescriptor(keyDescriptor));\n\t\t}\n\t});\n\treturn result;\n};\n\nKeyboardManager.prototype.getPrintableShortcuts = function(keyInfoArray) {\n\tvar self = this,\n\t\tresult = [];\n\t$tw.utils.each(keyInfoArray,function(keyInfo) {\n\t\tif(keyInfo) {\n\t\t\tresult.push((keyInfo.ctrlKey ? \"ctrl-\" : \"\") + \n\t\t\t\t   (keyInfo.shiftKey ? \"shift-\" : \"\") + \n\t\t\t\t   (keyInfo.altKey ? \"alt-\" : \"\") + \n\t\t\t\t   (keyInfo.metaKey ? self.metaKeyName : \"\") + \n\t\t\t\t   (self.keyNames[keyInfo.keyCode]));\n\t\t}\n\t});\n\treturn result;\n}\n\nKeyboardManager.prototype.checkKeyDescriptor = function(event,keyInfo) {\n\treturn keyInfo &&\n\t\t\tevent.keyCode === keyInfo.keyCode && \n\t\t\tevent.shiftKey === keyInfo.shiftKey && \n\t\t\tevent.altKey === keyInfo.altKey && \n\t\t\tevent.ctrlKey === keyInfo.ctrlKey && \n\t\t\tevent.metaKey === keyInfo.metaKey;\n};\n\nKeyboardManager.prototype.checkKeyDescriptors = function(event,keyInfoArray) {\n\tfor(var t=0; t<keyInfoArray.length; t++) {\n\t\tif(this.checkKeyDescriptor(event,keyInfoArray[t])) {\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n};\n\nexports.KeyboardManager = KeyboardManager;\n\n})();\n",
            "title": "$:/core/modules/keyboard.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/language.js": {
            "text": "/*\\\ntitle: $:/core/modules/language.js\ntype: application/javascript\nmodule-type: global\n\nThe $tw.Language() manages translateable strings\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nCreate an instance of the language manager. Options include:\nwiki: wiki from which to retrieve translation tiddlers\n*/\nfunction Language(options) {\n\toptions = options || \"\";\n\tthis.wiki = options.wiki || $tw.wiki;\n}\n\n/*\nReturn a wikified translateable string. The title is automatically prefixed with \"$:/language/\"\nOptions include:\nvariables: optional hashmap of variables to supply to the language wikification\n*/\nLanguage.prototype.getString = function(title,options) {\n\toptions = options || {};\n\ttitle = \"$:/language/\" + title;\n\treturn this.wiki.renderTiddler(\"text/plain\",title,{variables: options.variables});\n};\n\n/*\nReturn a raw, unwikified translateable string. The title is automatically prefixed with \"$:/language/\"\n*/\nLanguage.prototype.getRawString = function(title) {\n\ttitle = \"$:/language/\" + title;\n\treturn this.wiki.getTiddlerText(title);\n};\n\nexports.Language = Language;\n\n})();\n",
            "title": "$:/core/modules/language.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/macros/changecount.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/changecount.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to return the changecount for the current tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"changecount\";\n\nexports.params = [];\n\n/*\nRun the macro\n*/\nexports.run = function() {\n\treturn this.wiki.getChangeCount(this.getVariable(\"currentTiddler\")) + \"\";\n};\n\n})();\n",
            "title": "$:/core/modules/macros/changecount.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/contrastcolour.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/contrastcolour.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to choose which of two colours has the highest contrast with a base colour\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"contrastcolour\";\n\nexports.params = [\n\t{name: \"target\"},\n\t{name: \"fallbackTarget\"},\n\t{name: \"colourA\"},\n\t{name: \"colourB\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(target,fallbackTarget,colourA,colourB) {\n\tvar rgbTarget = $tw.utils.parseCSSColor(target) || $tw.utils.parseCSSColor(fallbackTarget);\n\tif(!rgbTarget) {\n\t\treturn colourA;\n\t}\n\tvar rgbColourA = $tw.utils.parseCSSColor(colourA),\n\t\trgbColourB = $tw.utils.parseCSSColor(colourB);\n\tif(rgbColourA && !rgbColourB) {\n\t\treturn rgbColourA;\n\t}\n\tif(rgbColourB && !rgbColourA) {\n\t\treturn rgbColourB;\n\t}\n\tif(!rgbColourA && !rgbColourB) {\n\t\t// If neither colour is readable, return a crude inverse of the target\n\t\treturn [255 - rgbTarget[0],255 - rgbTarget[1],255 - rgbTarget[2],rgbTarget[3]];\n\t}\n\t// Colour brightness formula derived from http://www.w3.org/WAI/ER/WD-AERT/#color-contrast\n\tvar brightnessTarget = rgbTarget[0] * 0.299 + rgbTarget[1] * 0.587 + rgbTarget[2] * 0.114,\n\t\tbrightnessA = rgbColourA[0] * 0.299 + rgbColourA[1] * 0.587 + rgbColourA[2] * 0.114,\n\t\tbrightnessB = rgbColourB[0] * 0.299 + rgbColourB[1] * 0.587 + rgbColourB[2] * 0.114;\n\treturn Math.abs(brightnessTarget - brightnessA) > Math.abs(brightnessTarget - brightnessB) ? colourA : colourB;\n};\n\n})();\n",
            "title": "$:/core/modules/macros/contrastcolour.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/csvtiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/csvtiddlers.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to output tiddlers matching a filter to CSV\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"csvtiddlers\";\n\nexports.params = [\n\t{name: \"filter\"},\n\t{name: \"format\"},\n];\n\n/*\nRun the macro\n*/\nexports.run = function(filter,format) {\n\tvar self = this,\n\t\ttiddlers = this.wiki.filterTiddlers(filter),\n\t\ttiddler,\n\t\tfields = [],\n\t\tt,f;\n\t// Collect all the fields\n\tfor(t=0;t<tiddlers.length; t++) {\n\t\ttiddler = this.wiki.getTiddler(tiddlers[t]);\n\t\tfor(f in tiddler.fields) {\n\t\t\tif(fields.indexOf(f) === -1) {\n\t\t\t\tfields.push(f);\n\t\t\t}\n\t\t}\n\t}\n\t// Sort the fields and bring the standard ones to the front\n\tfields.sort();\n\t\"title text modified modifier created creator\".split(\" \").reverse().forEach(function(value,index) {\n\t\tvar p = fields.indexOf(value);\n\t\tif(p !== -1) {\n\t\t\tfields.splice(p,1);\n\t\t\tfields.unshift(value)\n\t\t}\n\t});\n\t// Output the column headings\n\tvar output = [], row = [];\n\tfields.forEach(function(value) {\n\t\trow.push(quoteAndEscape(value))\n\t});\n\toutput.push(row.join(\",\"));\n\t// Output each tiddler\n\tfor(var t=0;t<tiddlers.length; t++) {\n\t\trow = [];\n\t\ttiddler = this.wiki.getTiddler(tiddlers[t]);\n\t\t\tfor(f=0; f<fields.length; f++) {\n\t\t\t\trow.push(quoteAndEscape(tiddler ? tiddler.getFieldString(fields[f]) || \"\" : \"\"));\n\t\t\t}\n\t\toutput.push(row.join(\",\"));\n\t}\n\treturn output.join(\"\\n\");\n};\n\nfunction quoteAndEscape(value) {\n\treturn \"\\\"\" + value.replace(/\"/mg,\"\\\"\\\"\") + \"\\\"\";\n}\n\n})();\n",
            "title": "$:/core/modules/macros/csvtiddlers.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/displayshortcuts.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/displayshortcuts.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to display a list of keyboard shortcuts in human readable form. Notably, it resolves named shortcuts like `((bold))` to the underlying keystrokes.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"displayshortcuts\";\n\nexports.params = [\n\t{name: \"shortcuts\"},\n\t{name: \"prefix\"},\n\t{name: \"separator\"},\n\t{name: \"suffix\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(shortcuts,prefix,separator,suffix) {\n\tvar shortcutArray = $tw.keyboardManager.getPrintableShortcuts($tw.keyboardManager.parseKeyDescriptors(shortcuts,{\n\t\twiki: this.wiki\n\t}));\n\tif(shortcutArray.length > 0) {\n\t\tshortcutArray.sort(function(a,b) {\n\t\t    return a.toLowerCase().localeCompare(b.toLowerCase());\n\t\t})\n\t\treturn prefix + shortcutArray.join(separator) + suffix;\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/macros/displayshortcuts.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/dumpvariables.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/dumpvariables.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to dump all active variable values\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"dumpvariables\";\n\nexports.params = [\n];\n\n/*\nRun the macro\n*/\nexports.run = function() {\n\tvar output = [\"|!Variable |!Value |\"],\n\t\tvariables = [], variable;\n\tfor(variable in this.variables) {\n\t\tvariables.push(variable);\n\t}\n\tvariables.sort();\n\tfor(var index=0; index<variables.length; index++) {\n\t\tvar variable = variables[index];\n\t\toutput.push(\"|\" + variable + \" |<input size=50 value=<<\" + variable + \">>/> |\")\n\t}\n\treturn output.join(\"\\n\");\n};\n\n})();\n",
            "title": "$:/core/modules/macros/dumpvariables.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/jsontiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/jsontiddlers.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to output tiddlers matching a filter to JSON\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"jsontiddlers\";\n\nexports.params = [\n\t{name: \"filter\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(filter) {\n\tvar tiddlers = this.wiki.filterTiddlers(filter),\n\t\tdata = [];\n\tfor(var t=0;t<tiddlers.length; t++) {\n\t\tvar tiddler = this.wiki.getTiddler(tiddlers[t]);\n\t\tif(tiddler) {\n\t\t\tvar fields = new Object();\n\t\t\tfor(var field in tiddler.fields) {\n\t\t\t\tfields[field] = tiddler.getFieldString(field);\n\t\t\t}\n\t\t\tdata.push(fields);\n\t\t}\n\t}\n\treturn JSON.stringify(data,null,$tw.config.preferences.jsonSpaces);\n};\n\n})();\n",
            "title": "$:/core/modules/macros/jsontiddlers.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/makedatauri.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/makedatauri.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to convert a string of text to a data URI\n\n<<makedatauri text:\"Text to be converted\" type:\"text/vnd.tiddlywiki\">>\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"makedatauri\";\n\nexports.params = [\n\t{name: \"text\"},\n\t{name: \"type\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(text,type) {\n\treturn $tw.utils.makeDataUri(text,type);\n};\n\n})();\n",
            "title": "$:/core/modules/macros/makedatauri.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/now.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/now.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to return a formatted version of the current time\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"now\";\n\nexports.params = [\n\t{name: \"format\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(format) {\n\treturn $tw.utils.formatDateString(new Date(),format || \"0hh:0mm, DDth MMM YYYY\");\n};\n\n})();\n",
            "title": "$:/core/modules/macros/now.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/qualify.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/qualify.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to qualify a state tiddler title according\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"qualify\";\n\nexports.params = [\n\t{name: \"title\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(title) {\n\treturn title + \"-\" + this.getStateQualifier();\n};\n\n})();\n",
            "title": "$:/core/modules/macros/qualify.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/resolvepath.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/resolvepath.js\ntype: application/javascript\nmodule-type: macro\n\nResolves a relative path for an absolute rootpath.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"resolvepath\";\n\nexports.params = [\n\t{name: \"source\"},\n\t{name: \"root\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(source, root) {\n\treturn $tw.utils.resolvePath(source, root);\n};\n\n})();\n",
            "title": "$:/core/modules/macros/resolvepath.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/version.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/version.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to return the TiddlyWiki core version number\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"version\";\n\nexports.params = [];\n\n/*\nRun the macro\n*/\nexports.run = function() {\n\treturn $tw.version;\n};\n\n})();\n",
            "title": "$:/core/modules/macros/version.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/parsers/audioparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/audioparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe audio parser parses an audio tiddler into an embeddable HTML element\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar AudioParser = function(type,text,options) {\n\tvar element = {\n\t\t\ttype: \"element\",\n\t\t\ttag: \"audio\",\n\t\t\tattributes: {\n\t\t\t\tcontrols: {type: \"string\", value: \"controls\"}\n\t\t\t}\n\t\t},\n\t\tsrc;\n\tif(options._canonical_uri) {\n\t\telement.attributes.src = {type: \"string\", value: options._canonical_uri};\n\t} else if(text) {\n\t\telement.attributes.src = {type: \"string\", value: \"data:\" + type + \";base64,\" + text};\n\t}\n\tthis.tree = [element];\n};\n\nexports[\"audio/ogg\"] = AudioParser;\nexports[\"audio/mpeg\"] = AudioParser;\nexports[\"audio/mp3\"] = AudioParser;\nexports[\"audio/mp4\"] = AudioParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/audioparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/csvparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/csvparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe CSV text parser processes CSV files into a table wrapped in a scrollable widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar CsvParser = function(type,text,options) {\n\t// Table framework\n\tthis.tree = [{\n\t\t\"type\": \"scrollable\", \"children\": [{\n\t\t\t\"type\": \"element\", \"tag\": \"table\", \"children\": [{\n\t\t\t\t\"type\": \"element\", \"tag\": \"tbody\", \"children\": []\n\t\t\t}], \"attributes\": {\n\t\t\t\t\"class\": {\"type\": \"string\", \"value\": \"tc-csv-table\"}\n\t\t\t}\n\t\t}]\n\t}];\n\t// Split the text into lines\n\tvar lines = text.split(/\\r?\\n/mg),\n\t\ttag = \"th\";\n\tfor(var line=0; line<lines.length; line++) {\n\t\tvar lineText = lines[line];\n\t\tif(lineText) {\n\t\t\tvar row = {\n\t\t\t\t\t\"type\": \"element\", \"tag\": \"tr\", \"children\": []\n\t\t\t\t};\n\t\t\tvar columns = lineText.split(\",\");\n\t\t\tfor(var column=0; column<columns.length; column++) {\n\t\t\t\trow.children.push({\n\t\t\t\t\t\t\"type\": \"element\", \"tag\": tag, \"children\": [{\n\t\t\t\t\t\t\t\"type\": \"text\",\n\t\t\t\t\t\t\t\"text\": columns[column]\n\t\t\t\t\t\t}]\n\t\t\t\t\t});\n\t\t\t}\n\t\t\ttag = \"td\";\n\t\t\tthis.tree[0].children[0].children[0].children.push(row);\n\t\t}\n\t}\n};\n\nexports[\"text/csv\"] = CsvParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/csvparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/htmlparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/htmlparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe HTML parser displays text as raw HTML\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar HtmlParser = function(type,text,options) {\n\tvar src;\n\tif(options._canonical_uri) {\n\t\tsrc = options._canonical_uri;\n\t} else if(text) {\n\t\tsrc = \"data:text/html;charset=utf-8,\" + encodeURIComponent(text);\n\t}\n\tthis.tree = [{\n\t\ttype: \"element\",\n\t\ttag: \"iframe\",\n\t\tattributes: {\n\t\t\tsrc: {type: \"string\", value: src},\n\t\t\tsandbox: {type: \"string\", value: \"\"}\n\t\t}\n\t}];\n};\n\nexports[\"text/html\"] = HtmlParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/htmlparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/imageparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/imageparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe image parser parses an image into an embeddable HTML element\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar ImageParser = function(type,text,options) {\n\tvar element = {\n\t\t\ttype: \"element\",\n\t\t\ttag: \"img\",\n\t\t\tattributes: {}\n\t\t},\n\t\tsrc;\n\tif(options._canonical_uri) {\n\t\telement.attributes.src = {type: \"string\", value: options._canonical_uri};\n\t\tif(type === \"application/pdf\" || type === \".pdf\") {\n\t\t\telement.tag = \"embed\";\n\t\t}\n\t} else if(text) {\n\t\tif(type === \"application/pdf\" || type === \".pdf\") {\n\t\t\telement.attributes.src = {type: \"string\", value: \"data:application/pdf;base64,\" + text};\n\t\t\telement.tag = \"embed\";\n\t\t} else if(type === \"image/svg+xml\" || type === \".svg\") {\n\t\t\telement.attributes.src = {type: \"string\", value: \"data:image/svg+xml,\" + encodeURIComponent(text)};\n\t\t} else {\n\t\t\telement.attributes.src = {type: \"string\", value: \"data:\" + type + \";base64,\" + text};\n\t\t}\n\t}\n\tthis.tree = [element];\n};\n\nexports[\"image/svg+xml\"] = ImageParser;\nexports[\"image/jpg\"] = ImageParser;\nexports[\"image/jpeg\"] = ImageParser;\nexports[\"image/png\"] = ImageParser;\nexports[\"image/gif\"] = ImageParser;\nexports[\"application/pdf\"] = ImageParser;\nexports[\"image/x-icon\"] = ImageParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/imageparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/utils/parseutils.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/parseutils.js\ntype: application/javascript\nmodule-type: utils\n\nUtility functions concerned with parsing text into tokens.\n\nMost functions have the following pattern:\n\n* The parameters are:\n** `source`: the source string being parsed\n** `pos`: the current parse position within the string\n** Any further parameters are used to identify the token that is being parsed\n* The return value is:\n** null if the token was not found at the specified position\n** an object representing the token with the following standard fields:\n*** `type`: string indicating the type of the token\n*** `start`: start position of the token in the source string\n*** `end`: end position of the token in the source string\n*** Any further fields required to describe the token\n\nThe exception is `skipWhiteSpace`, which just returns the position after the whitespace.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nLook for a whitespace token. Returns null if not found, otherwise returns {type: \"whitespace\", start:, end:,}\n*/\nexports.parseWhiteSpace = function(source,pos) {\n\tvar p = pos,c;\n\twhile(true) {\n\t\tc = source.charAt(p);\n\t\tif((c === \" \") || (c === \"\\f\") || (c === \"\\n\") || (c === \"\\r\") || (c === \"\\t\") || (c === \"\\v\") || (c === \"\\u00a0\")) { // Ignores some obscure unicode spaces\n\t\t\tp++;\n\t\t} else {\n\t\t\tbreak;\n\t\t}\n\t}\n\tif(p === pos) {\n\t\treturn null;\n\t} else {\n\t\treturn {\n\t\t\ttype: \"whitespace\",\n\t\t\tstart: pos,\n\t\t\tend: p\n\t\t}\n\t}\n};\n\n/*\nConvenience wrapper for parseWhiteSpace. Returns the position after the whitespace\n*/\nexports.skipWhiteSpace = function(source,pos) {\n\tvar c;\n\twhile(true) {\n\t\tc = source.charAt(pos);\n\t\tif((c === \" \") || (c === \"\\f\") || (c === \"\\n\") || (c === \"\\r\") || (c === \"\\t\") || (c === \"\\v\") || (c === \"\\u00a0\")) { // Ignores some obscure unicode spaces\n\t\t\tpos++;\n\t\t} else {\n\t\t\treturn pos;\n\t\t}\n\t}\n};\n\n/*\nLook for a given string token. Returns null if not found, otherwise returns {type: \"token\", value:, start:, end:,}\n*/\nexports.parseTokenString = function(source,pos,token) {\n\tvar match = source.indexOf(token,pos) === pos;\n\tif(match) {\n\t\treturn {\n\t\t\ttype: \"token\",\n\t\t\tvalue: token,\n\t\t\tstart: pos,\n\t\t\tend: pos + token.length\n\t\t};\n\t}\n\treturn null;\n};\n\n/*\nLook for a token matching a regex. Returns null if not found, otherwise returns {type: \"regexp\", match:, start:, end:,}\n*/\nexports.parseTokenRegExp = function(source,pos,reToken) {\n\tvar node = {\n\t\ttype: \"regexp\",\n\t\tstart: pos\n\t};\n\treToken.lastIndex = pos;\n\tnode.match = reToken.exec(source);\n\tif(node.match && node.match.index === pos) {\n\t\tnode.end = pos + node.match[0].length;\n\t\treturn node;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n/*\nLook for a string literal. Returns null if not found, otherwise returns {type: \"string\", value:, start:, end:,}\n*/\nexports.parseStringLiteral = function(source,pos) {\n\tvar node = {\n\t\ttype: \"string\",\n\t\tstart: pos\n\t};\n\tvar reString = /(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\")|(?:'([^']*)')/g;\n\treString.lastIndex = pos;\n\tvar match = reString.exec(source);\n\tif(match && match.index === pos) {\n\t\tnode.value = match[1] !== undefined ? match[1] :(\n\t\t\tmatch[2] !== undefined ? match[2] : match[3] \n\t\t\t\t\t);\n\t\tnode.end = pos + match[0].length;\n\t\treturn node;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n/*\nLook for a macro invocation parameter. Returns null if not found, or {type: \"macro-parameter\", name:, value:, start:, end:}\n*/\nexports.parseMacroParameter = function(source,pos) {\n\tvar node = {\n\t\ttype: \"macro-parameter\",\n\t\tstart: pos\n\t};\n\t// Define our regexp\n\tvar reMacroParameter = /(?:([A-Za-z0-9\\-_]+)\\s*:)?(?:\\s*(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\"|'([^']*)'|\\[\\[([^\\]]*)\\]\\]|([^\\s>\"'=]+)))/g;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for the parameter\n\tvar token = $tw.utils.parseTokenRegExp(source,pos,reMacroParameter);\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Get the parameter details\n\tnode.value = token.match[2] !== undefined ? token.match[2] : (\n\t\t\t\t\ttoken.match[3] !== undefined ? token.match[3] : (\n\t\t\t\t\t\ttoken.match[4] !== undefined ? token.match[4] : (\n\t\t\t\t\t\t\ttoken.match[5] !== undefined ? token.match[5] : (\n\t\t\t\t\t\t\t\ttoken.match[6] !== undefined ? token.match[6] : (\n\t\t\t\t\t\t\t\t\t\"\"\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t);\n\tif(token.match[1]) {\n\t\tnode.name = token.match[1];\n\t}\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\n/*\nLook for a macro invocation. Returns null if not found, or {type: \"macrocall\", name:, parameters:, start:, end:}\n*/\nexports.parseMacroInvocation = function(source,pos) {\n\tvar node = {\n\t\ttype: \"macrocall\",\n\t\tstart: pos,\n\t\tparams: []\n\t};\n\t// Define our regexps\n\tvar reMacroName = /([^\\s>\"'=]+)/g;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for a double less than sign\n\tvar token = $tw.utils.parseTokenString(source,pos,\"<<\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Get the macro name\n\tvar name = $tw.utils.parseTokenRegExp(source,pos,reMacroName);\n\tif(!name) {\n\t\treturn null;\n\t}\n\tnode.name = name.match[1];\n\tpos = name.end;\n\t// Process parameters\n\tvar parameter = $tw.utils.parseMacroParameter(source,pos);\n\twhile(parameter) {\n\t\tnode.params.push(parameter);\n\t\tpos = parameter.end;\n\t\t// Get the next parameter\n\t\tparameter = $tw.utils.parseMacroParameter(source,pos);\n\t}\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for a double greater than sign\n\ttoken = $tw.utils.parseTokenString(source,pos,\">>\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\n/*\nLook for an HTML attribute definition. Returns null if not found, otherwise returns {type: \"attribute\", name:, valueType: \"string|indirect|macro\", value:, start:, end:,}\n*/\nexports.parseAttribute = function(source,pos) {\n\tvar node = {\n\t\tstart: pos\n\t};\n\t// Define our regexps\n\tvar reAttributeName = /([^\\/\\s>\"'=]+)/g,\n\t\treUnquotedAttribute = /([^\\/\\s<>\"'=]+)/g,\n\t\treIndirectValue = /\\{\\{([^\\}]+)\\}\\}/g;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Get the attribute name\n\tvar name = $tw.utils.parseTokenRegExp(source,pos,reAttributeName);\n\tif(!name) {\n\t\treturn null;\n\t}\n\tnode.name = name.match[1];\n\tpos = name.end;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for an equals sign\n\tvar token = $tw.utils.parseTokenString(source,pos,\"=\");\n\tif(token) {\n\t\tpos = token.end;\n\t\t// Skip whitespace\n\t\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t\t// Look for a string literal\n\t\tvar stringLiteral = $tw.utils.parseStringLiteral(source,pos);\n\t\tif(stringLiteral) {\n\t\t\tpos = stringLiteral.end;\n\t\t\tnode.type = \"string\";\n\t\t\tnode.value = stringLiteral.value;\n\t\t} else {\n\t\t\t// Look for an indirect value\n\t\t\tvar indirectValue = $tw.utils.parseTokenRegExp(source,pos,reIndirectValue);\n\t\t\tif(indirectValue) {\n\t\t\t\tpos = indirectValue.end;\n\t\t\t\tnode.type = \"indirect\";\n\t\t\t\tnode.textReference = indirectValue.match[1];\n\t\t\t} else {\n\t\t\t\t// Look for a unquoted value\n\t\t\t\tvar unquotedValue = $tw.utils.parseTokenRegExp(source,pos,reUnquotedAttribute);\n\t\t\t\tif(unquotedValue) {\n\t\t\t\t\tpos = unquotedValue.end;\n\t\t\t\t\tnode.type = \"string\";\n\t\t\t\t\tnode.value = unquotedValue.match[1];\n\t\t\t\t} else {\n\t\t\t\t\t// Look for a macro invocation value\n\t\t\t\t\tvar macroInvocation = $tw.utils.parseMacroInvocation(source,pos);\n\t\t\t\t\tif(macroInvocation) {\n\t\t\t\t\t\tpos = macroInvocation.end;\n\t\t\t\t\t\tnode.type = \"macro\";\n\t\t\t\t\t\tnode.value = macroInvocation;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.type = \"string\";\n\t\t\t\t\t\tnode.value = \"true\";\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnode.type = \"string\";\n\t\tnode.value = \"true\";\n\t}\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\n})();\n",
            "title": "$:/core/modules/utils/parseutils.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/parsers/textparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/textparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe plain text parser processes blocks of source text into a degenerate parse tree consisting of a single text node\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar TextParser = function(type,text,options) {\n\tthis.tree = [{\n\t\ttype: \"codeblock\",\n\t\tattributes: {\n\t\t\tcode: {type: \"string\", value: text},\n\t\t\tlanguage: {type: \"string\", value: type}\n\t\t}\n\t}];\n};\n\nexports[\"text/plain\"] = TextParser;\nexports[\"text/x-tiddlywiki\"] = TextParser;\nexports[\"application/javascript\"] = TextParser;\nexports[\"application/json\"] = TextParser;\nexports[\"text/css\"] = TextParser;\nexports[\"application/x-tiddler-dictionary\"] = TextParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/textparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/videoparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/videoparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe video parser parses a video tiddler into an embeddable HTML element\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar AudioParser = function(type,text,options) {\n\tvar element = {\n\t\t\ttype: \"element\",\n\t\t\ttag: \"video\",\n\t\t\tattributes: {\n\t\t\t\tcontrols: {type: \"string\", value: \"controls\"}\n\t\t\t}\n\t\t},\n\t\tsrc;\n\tif(options._canonical_uri) {\n\t\telement.attributes.src = {type: \"string\", value: options._canonical_uri};\n\t} else if(text) {\n\t\telement.attributes.src = {type: \"string\", value: \"data:\" + type + \";base64,\" + text};\n\t}\n\tthis.tree = [element];\n};\n\nexports[\"video/mp4\"] = AudioParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/videoparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/wikiparser/rules/codeblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/codeblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for code blocks. For example:\n\n```\n\t```\n\tThis text will not be //wikified//\n\t```\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"codeblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match and get language if defined\n\tthis.matchRegExp = /```([\\w-]*)\\r?\\n/mg;\n};\n\nexports.parse = function() {\n\tvar reEnd = /(\\r?\\n```$)/mg;\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Look for the end of the block\n\treEnd.lastIndex = this.parser.pos;\n\tvar match = reEnd.exec(this.parser.source),\n\t\ttext;\n\t// Process the block\n\tif(match) {\n\t\ttext = this.parser.source.substring(this.parser.pos,match.index);\n\t\tthis.parser.pos = match.index + match[0].length;\n\t} else {\n\t\ttext = this.parser.source.substr(this.parser.pos);\n\t\tthis.parser.pos = this.parser.sourceLength;\n\t}\n\t// Return the $codeblock widget\n\treturn [{\n\t\t\ttype: \"codeblock\",\n\t\t\tattributes: {\n\t\t\t\t\tcode: {type: \"string\", value: text},\n\t\t\t\t\tlanguage: {type: \"string\", value: this.match[1]}\n\t\t\t}\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/codeblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/codeinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/codeinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for code runs. For example:\n\n```\n\tThis is a `code run`.\n\tThis is another ``code run``\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"codeinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /(``?)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tvar reEnd = new RegExp(this.match[1], \"mg\");\n\t// Look for the end marker\n\treEnd.lastIndex = this.parser.pos;\n\tvar match = reEnd.exec(this.parser.source),\n\t\ttext;\n\t// Process the text\n\tif(match) {\n\t\ttext = this.parser.source.substring(this.parser.pos,match.index);\n\t\tthis.parser.pos = match.index + match[0].length;\n\t} else {\n\t\ttext = this.parser.source.substr(this.parser.pos);\n\t\tthis.parser.pos = this.parser.sourceLength;\n\t}\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"code\",\n\t\tchildren: [{\n\t\t\ttype: \"text\",\n\t\t\ttext: text\n\t\t}]\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/codeinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/commentblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/commentblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for HTML comments. For example:\n\n```\n<!-- This is a comment -->\n```\n\nNote that the syntax for comments is simplified to an opening \"<!--\" sequence and a closing \"-->\" sequence -- HTML itself implements a more complex format (see http://ostermiller.org/findhtmlcomment.html)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"commentblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\tthis.matchRegExp = /<!--/mg;\n\tthis.endMatchRegExp = /-->/mg;\n};\n\nexports.findNextMatch = function(startPos) {\n\tthis.matchRegExp.lastIndex = startPos;\n\tthis.match = this.matchRegExp.exec(this.parser.source);\n\tif(this.match) {\n\t\tthis.endMatchRegExp.lastIndex = startPos + this.match[0].length;\n\t\tthis.endMatch = this.endMatchRegExp.exec(this.parser.source);\n\t\tif(this.endMatch) {\n\t\t\treturn this.match.index;\n\t\t}\n\t}\n\treturn undefined;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.endMatchRegExp.lastIndex;\n\t// Don't return any elements\n\treturn [];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/commentblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/commentinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/commentinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for HTML comments. For example:\n\n```\n<!-- This is a comment -->\n```\n\nNote that the syntax for comments is simplified to an opening \"<!--\" sequence and a closing \"-->\" sequence -- HTML itself implements a more complex format (see http://ostermiller.org/findhtmlcomment.html)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"commentinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\tthis.matchRegExp = /<!--/mg;\n\tthis.endMatchRegExp = /-->/mg;\n};\n\nexports.findNextMatch = function(startPos) {\n\tthis.matchRegExp.lastIndex = startPos;\n\tthis.match = this.matchRegExp.exec(this.parser.source);\n\tif(this.match) {\n\t\tthis.endMatchRegExp.lastIndex = startPos + this.match[0].length;\n\t\tthis.endMatch = this.endMatchRegExp.exec(this.parser.source);\n\t\tif(this.endMatch) {\n\t\t\treturn this.match.index;\n\t\t}\n\t}\n\treturn undefined;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.endMatchRegExp.lastIndex;\n\t// Don't return any elements\n\treturn [];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/commentinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/dash.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/dash.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for dashes. For example:\n\n```\nThis is an en-dash: --\n\nThis is an em-dash: ---\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"dash\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /-{2,3}(?!-)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tvar dash = this.match[0].length === 2 ? \"&ndash;\" : \"&mdash;\";\n\treturn [{\n\t\ttype: \"entity\",\n\t\tentity: dash\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/dash.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/bold.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/bold.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - bold. For example:\n\n```\n\tThis is ''bold'' text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except bold \n\\rules only bold \n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"bold\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /''/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/''/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"strong\",\n\t\tchildren: tree\n\t}];\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/bold.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/italic.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/italic.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - italic. For example:\n\n```\n\tThis is //italic// text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except italic\n\\rules only italic\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"italic\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\/\\//mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/\\/\\//mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"em\",\n\t\tchildren: tree\n\t}];\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/italic.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/strikethrough.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/strikethrough.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - strikethrough. For example:\n\n```\n\tThis is ~~strikethrough~~ text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except strikethrough \n\\rules only strikethrough \n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"strikethrough\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /~~/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/~~/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"strike\",\n\t\tchildren: tree\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/strikethrough.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/subscript.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/subscript.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - subscript. For example:\n\n```\n\tThis is ,,subscript,, text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except subscript \n\\rules only subscript \n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"subscript\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /,,/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/,,/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"sub\",\n\t\tchildren: tree\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/subscript.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/superscript.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/superscript.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - superscript. For example:\n\n```\n\tThis is ^^superscript^^ text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except superscript \n\\rules only superscript \n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"superscript\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\^\\^/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/\\^\\^/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"sup\",\n\t\tchildren: tree\n\t}];\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/superscript.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/underscore.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/underscore.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - underscore. For example:\n\n```\n\tThis is __underscore__ text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except underscore \n\\rules only underscore\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"underscore\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /__/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/__/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"u\",\n\t\tchildren: tree\n\t}];\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/underscore.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/entity.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/entity.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for HTML entities. For example:\n\n```\n\tThis is a copyright symbol: &copy;\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"entity\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /(&#?[a-zA-Z0-9]{2,8};)/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get all the details of the match\n\tvar entityString = this.match[1];\n\t// Move past the macro call\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Return the entity\n\treturn [{type: \"entity\", entity: this.match[0]}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/entity.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/extlink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/extlink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for external links. For example:\n\n```\nAn external link: http://www.tiddlywiki.com/\n\nA suppressed external link: ~http://www.tiddlyspace.com/\n```\n\nExternal links can be suppressed by preceding them with `~`.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"extlink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /~?(?:file|http|https|mailto|ftp|irc|news|data|skype):[^\\s<>{}\\[\\]`|\"\\\\^]+(?:\\/|\\b)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Create the link unless it is suppressed\n\tif(this.match[0].substr(0,1) === \"~\") {\n\t\treturn [{type: \"text\", text: this.match[0].substr(1)}];\n\t} else {\n\t\treturn [{\n\t\t\ttype: \"element\",\n\t\t\ttag: \"a\",\n\t\t\tattributes: {\n\t\t\t\thref: {type: \"string\", value: this.match[0]},\n\t\t\t\t\"class\": {type: \"string\", value: \"tc-tiddlylink-external\"},\n\t\t\t\ttarget: {type: \"string\", value: \"_blank\"},\n\t\t\t\trel: {type: \"string\", value: \"noopener noreferrer\"}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\", text: this.match[0]\n\t\t\t}]\n\t\t}];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/extlink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/filteredtranscludeblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/filteredtranscludeblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for block-level filtered transclusion. For example:\n\n```\n{{{ [tag[docs]] }}}\n{{{ [tag[docs]] |tooltip}}}\n{{{ [tag[docs]] ||TemplateTitle}}}\n{{{ [tag[docs]] |tooltip||TemplateTitle}}}\n{{{ [tag[docs]] }}width:40;height:50;}.class.class\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"filteredtranscludeblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\{\\{\\{([^\\|]+?)(?:\\|([^\\|\\{\\}]+))?(?:\\|\\|([^\\|\\{\\}]+))?\\}\\}([^\\}]*)\\}(?:\\.(\\S+))?(?:\\r?\\n|$)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Get the match details\n\tvar filter = this.match[1],\n\t\ttooltip = this.match[2],\n\t\ttemplate = $tw.utils.trim(this.match[3]),\n\t\tstyle = this.match[4],\n\t\tclasses = this.match[5];\n\t// Return the list widget\n\tvar node = {\n\t\ttype: \"list\",\n\t\tattributes: {\n\t\t\tfilter: {type: \"string\", value: filter}\n\t\t},\n\t\tisBlock: true\n\t};\n\tif(tooltip) {\n\t\tnode.attributes.tooltip = {type: \"string\", value: tooltip};\n\t}\n\tif(template) {\n\t\tnode.attributes.template = {type: \"string\", value: template};\n\t}\n\tif(style) {\n\t\tnode.attributes.style = {type: \"string\", value: style};\n\t}\n\tif(classes) {\n\t\tnode.attributes.itemClass = {type: \"string\", value: classes.split(\".\").join(\" \")};\n\t}\n\treturn [node];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/filteredtranscludeblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/filteredtranscludeinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/filteredtranscludeinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for inline filtered transclusion. For example:\n\n```\n{{{ [tag[docs]] }}}\n{{{ [tag[docs]] |tooltip}}}\n{{{ [tag[docs]] ||TemplateTitle}}}\n{{{ [tag[docs]] |tooltip||TemplateTitle}}}\n{{{ [tag[docs]] }}width:40;height:50;}.class.class\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"filteredtranscludeinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\{\\{\\{([^\\|]+?)(?:\\|([^\\|\\{\\}]+))?(?:\\|\\|([^\\|\\{\\}]+))?\\}\\}([^\\}]*)\\}(?:\\.(\\S+))?/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Get the match details\n\tvar filter = this.match[1],\n\t\ttooltip = this.match[2],\n\t\ttemplate = $tw.utils.trim(this.match[3]),\n\t\tstyle = this.match[4],\n\t\tclasses = this.match[5];\n\t// Return the list widget\n\tvar node = {\n\t\ttype: \"list\",\n\t\tattributes: {\n\t\t\tfilter: {type: \"string\", value: filter}\n\t\t}\n\t};\n\tif(tooltip) {\n\t\tnode.attributes.tooltip = {type: \"string\", value: tooltip};\n\t}\n\tif(template) {\n\t\tnode.attributes.template = {type: \"string\", value: template};\n\t}\n\tif(style) {\n\t\tnode.attributes.style = {type: \"string\", value: style};\n\t}\n\tif(classes) {\n\t\tnode.attributes.itemClass = {type: \"string\", value: classes.split(\".\").join(\" \")};\n\t}\n\treturn [node];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/filteredtranscludeinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/hardlinebreaks.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/hardlinebreaks.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for marking areas with hard line breaks. For example:\n\n```\n\"\"\"\nThis is some text\nThat is set like\nIt is a Poem\nWhen it is\nClearly\nNot\n\"\"\"\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"hardlinebreaks\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\"\"\"(?:\\r?\\n)?/mg;\n};\n\nexports.parse = function() {\n\tvar reEnd = /(\"\"\")|(\\r?\\n)/mg,\n\t\ttree = [],\n\t\tmatch;\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tdo {\n\t\t// Parse the run up to the terminator\n\t\ttree.push.apply(tree,this.parser.parseInlineRun(reEnd,{eatTerminator: false}));\n\t\t// Redo the terminator match\n\t\treEnd.lastIndex = this.parser.pos;\n\t\tmatch = reEnd.exec(this.parser.source);\n\t\tif(match) {\n\t\t\tthis.parser.pos = reEnd.lastIndex;\n\t\t\t// Add a line break if the terminator was a line break\n\t\t\tif(match[2]) {\n\t\t\t\ttree.push({type: \"element\", tag: \"br\"});\n\t\t\t}\n\t\t}\n\t} while(match && !match[1]);\n\t// Return the nodes\n\treturn tree;\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/hardlinebreaks.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/heading.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/heading.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for headings\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"heading\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /(!{1,6})/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get all the details of the match\n\tvar headingLevel = this.match[1].length;\n\t// Move past the !s\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Parse any classes, whitespace and then the heading itself\n\tvar classes = this.parser.parseClasses();\n\tthis.parser.skipWhitespace({treatNewlinesAsNonWhitespace: true});\n\tvar tree = this.parser.parseInlineRun(/(\\r?\\n)/mg);\n\t// Return the heading\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"h\" + headingLevel, \n\t\tattributes: {\n\t\t\t\"class\": {type: \"string\", value: classes.join(\" \")}\n\t\t},\n\t\tchildren: tree\n\t}];\n};\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/heading.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/horizrule.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/horizrule.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for rules. For example:\n\n```\n---\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"horizrule\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /-{3,}\\r?(?:\\n|$)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\treturn [{type: \"element\", tag: \"hr\"}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/horizrule.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/html.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/html.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki rule for HTML elements and widgets. For example:\n\n{{{\n<aside>\nThis is an HTML5 aside element\n</aside>\n\n<$slider target=\"MyTiddler\">\nThis is a widget invocation\n</$slider>\n\n}}}\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"html\";\nexports.types = {inline: true, block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n};\n\nexports.findNextMatch = function(startPos) {\n\t// Find the next tag\n\tthis.nextTag = this.findNextTag(this.parser.source,startPos,{\n\t\trequireLineBreak: this.is.block\n\t});\n\treturn this.nextTag ? this.nextTag.start : undefined;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Retrieve the most recent match so that recursive calls don't overwrite it\n\tvar tag = this.nextTag;\n\tthis.nextTag = null;\n\t// Advance the parser position to past the tag\n\tthis.parser.pos = tag.end;\n\t// Check for an immediately following double linebreak\n\tvar hasLineBreak = !tag.isSelfClosing && !!$tw.utils.parseTokenRegExp(this.parser.source,this.parser.pos,/([^\\S\\n\\r]*\\r?\\n(?:[^\\S\\n\\r]*\\r?\\n|$))/g);\n\t// Set whether we're in block mode\n\ttag.isBlock = this.is.block || hasLineBreak;\n\t// Parse the body if we need to\n\tif(!tag.isSelfClosing && $tw.config.htmlVoidElements.indexOf(tag.tag) === -1) {\n\t\t\tvar reEndString = \"</\" + $tw.utils.escapeRegExp(tag.tag) + \">\",\n\t\t\t\treEnd = new RegExp(\"(\" + reEndString + \")\",\"mg\");\n\t\tif(hasLineBreak) {\n\t\t\ttag.children = this.parser.parseBlocks(reEndString);\n\t\t} else {\n\t\t\ttag.children = this.parser.parseInlineRun(reEnd);\n\t\t}\n\t\treEnd.lastIndex = this.parser.pos;\n\t\tvar endMatch = reEnd.exec(this.parser.source);\n\t\tif(endMatch && endMatch.index === this.parser.pos) {\n\t\t\tthis.parser.pos = endMatch.index + endMatch[0].length;\n\t\t}\n\t}\n\t// Return the tag\n\treturn [tag];\n};\n\n/*\nLook for an HTML tag. Returns null if not found, otherwise returns {type: \"element\", name:, attributes: [], isSelfClosing:, start:, end:,}\n*/\nexports.parseTag = function(source,pos,options) {\n\toptions = options || {};\n\tvar token,\n\t\tnode = {\n\t\t\ttype: \"element\",\n\t\t\tstart: pos,\n\t\t\tattributes: {}\n\t\t};\n\t// Define our regexps\n\tvar reTagName = /([a-zA-Z0-9\\-\\$]+)/g;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for a less than sign\n\ttoken = $tw.utils.parseTokenString(source,pos,\"<\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Get the tag name\n\ttoken = $tw.utils.parseTokenRegExp(source,pos,reTagName);\n\tif(!token) {\n\t\treturn null;\n\t}\n\tnode.tag = token.match[1];\n\tif(node.tag.charAt(0) === \"$\") {\n\t\tnode.type = node.tag.substr(1);\n\t}\n\tpos = token.end;\n\t// Process attributes\n\tvar attribute = $tw.utils.parseAttribute(source,pos);\n\twhile(attribute) {\n\t\tnode.attributes[attribute.name] = attribute;\n\t\tpos = attribute.end;\n\t\t// Get the next attribute\n\t\tattribute = $tw.utils.parseAttribute(source,pos);\n\t}\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for a closing slash\n\ttoken = $tw.utils.parseTokenString(source,pos,\"/\");\n\tif(token) {\n\t\tpos = token.end;\n\t\tnode.isSelfClosing = true;\n\t}\n\t// Look for a greater than sign\n\ttoken = $tw.utils.parseTokenString(source,pos,\">\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Check for a required line break\n\tif(options.requireLineBreak) {\n\t\ttoken = $tw.utils.parseTokenRegExp(source,pos,/([^\\S\\n\\r]*\\r?\\n(?:[^\\S\\n\\r]*\\r?\\n|$))/g);\n\t\tif(!token) {\n\t\t\treturn null;\n\t\t}\n\t}\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\nexports.findNextTag = function(source,pos,options) {\n\t// A regexp for finding candidate HTML tags\n\tvar reLookahead = /<([a-zA-Z\\-\\$]+)/g;\n\t// Find the next candidate\n\treLookahead.lastIndex = pos;\n\tvar match = reLookahead.exec(source);\n\twhile(match) {\n\t\t// Try to parse the candidate as a tag\n\t\tvar tag = this.parseTag(source,match.index,options);\n\t\t// Return success\n\t\tif(tag && this.isLegalTag(tag)) {\n\t\t\treturn tag;\n\t\t}\n\t\t// Look for the next match\n\t\treLookahead.lastIndex = match.index + 1;\n\t\tmatch = reLookahead.exec(source);\n\t}\n\t// Failed\n\treturn null;\n};\n\nexports.isLegalTag = function(tag) {\n\t// Widgets are always OK\n\tif(tag.type !== \"element\") {\n\t\treturn true;\n\t// If it's an HTML tag that starts with a dash then it's not legal\n\t} else if(tag.tag.charAt(0) === \"-\") {\n\t\treturn false;\n\t} else {\n\t\t// Otherwise it's OK\n\t\treturn true;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/html.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/image.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/image.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for embedding images. For example:\n\n```\n[img[http://tiddlywiki.com/fractalveg.jpg]]\n[img width=23 height=24 [http://tiddlywiki.com/fractalveg.jpg]]\n[img width={{!!width}} height={{!!height}} [http://tiddlywiki.com/fractalveg.jpg]]\n[img[Description of image|http://tiddlywiki.com/fractalveg.jpg]]\n[img[TiddlerTitle]]\n[img[Description of image|TiddlerTitle]]\n```\n\nGenerates the `<$image>` widget.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"image\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n};\n\nexports.findNextMatch = function(startPos) {\n\t// Find the next tag\n\tthis.nextImage = this.findNextImage(this.parser.source,startPos);\n\treturn this.nextImage ? this.nextImage.start : undefined;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.nextImage.end;\n\tvar node = {\n\t\ttype: \"image\",\n\t\tattributes: this.nextImage.attributes\n\t};\n\treturn [node];\n};\n\n/*\nFind the next image from the current position\n*/\nexports.findNextImage = function(source,pos) {\n\t// A regexp for finding candidate HTML tags\n\tvar reLookahead = /(\\[img)/g;\n\t// Find the next candidate\n\treLookahead.lastIndex = pos;\n\tvar match = reLookahead.exec(source);\n\twhile(match) {\n\t\t// Try to parse the candidate as a tag\n\t\tvar tag = this.parseImage(source,match.index);\n\t\t// Return success\n\t\tif(tag) {\n\t\t\treturn tag;\n\t\t}\n\t\t// Look for the next match\n\t\treLookahead.lastIndex = match.index + 1;\n\t\tmatch = reLookahead.exec(source);\n\t}\n\t// Failed\n\treturn null;\n};\n\n/*\nLook for an image at the specified position. Returns null if not found, otherwise returns {type: \"image\", attributes: [], isSelfClosing:, start:, end:,}\n*/\nexports.parseImage = function(source,pos) {\n\tvar token,\n\t\tnode = {\n\t\t\ttype: \"image\",\n\t\t\tstart: pos,\n\t\t\tattributes: {}\n\t\t};\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for the `[img`\n\ttoken = $tw.utils.parseTokenString(source,pos,\"[img\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Process attributes\n\tif(source.charAt(pos) !== \"[\") {\n\t\tvar attribute = $tw.utils.parseAttribute(source,pos);\n\t\twhile(attribute) {\n\t\t\tnode.attributes[attribute.name] = attribute;\n\t\t\tpos = attribute.end;\n\t\t\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t\t\tif(source.charAt(pos) !== \"[\") {\n\t\t\t\t// Get the next attribute\n\t\t\t\tattribute = $tw.utils.parseAttribute(source,pos);\n\t\t\t} else {\n\t\t\t\tattribute = null;\n\t\t\t}\n\t\t}\n\t}\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for the `[` after the attributes\n\ttoken = $tw.utils.parseTokenString(source,pos,\"[\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Get the source up to the terminating `]]`\n\ttoken = $tw.utils.parseTokenRegExp(source,pos,/(?:([^|\\]]*?)\\|)?([^\\]]+?)\\]\\]/g);\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\tif(token.match[1]) {\n\t\tnode.attributes.tooltip = {type: \"string\", value: token.match[1].trim()};\n\t}\n\tnode.attributes.source = {type: \"string\", value: (token.match[2] || \"\").trim()};\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/image.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/list.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/list.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for lists. For example:\n\n```\n* This is an unordered list\n* It has two items\n\n# This is a numbered list\n## With a subitem\n# And a third item\n\n; This is a term that is being defined\n: This is the definition of that term\n```\n\nNote that lists can be nested arbitrarily:\n\n```\n#** One\n#* Two\n#** Three\n#**** Four\n#**# Five\n#**## Six\n## Seven\n### Eight\n## Nine\n```\n\nA CSS class can be applied to a list item as follows:\n\n```\n* List item one\n*.active List item two has the class `active`\n* List item three\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"list\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /([\\*#;:>]+)/mg;\n};\n\nvar listTypes = {\n\t\"*\": {listTag: \"ul\", itemTag: \"li\"},\n\t\"#\": {listTag: \"ol\", itemTag: \"li\"},\n\t\";\": {listTag: \"dl\", itemTag: \"dt\"},\n\t\":\": {listTag: \"dl\", itemTag: \"dd\"},\n\t\">\": {listTag: \"blockquote\", itemTag: \"p\"}\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Array of parse tree nodes for the previous row of the list\n\tvar listStack = [];\n\t// Cycle through the items in the list\n\twhile(true) {\n\t\t// Match the list marker\n\t\tvar reMatch = /([\\*#;:>]+)/mg;\n\t\treMatch.lastIndex = this.parser.pos;\n\t\tvar match = reMatch.exec(this.parser.source);\n\t\tif(!match || match.index !== this.parser.pos) {\n\t\t\tbreak;\n\t\t}\n\t\t// Check whether the list type of the top level matches\n\t\tvar listInfo = listTypes[match[0].charAt(0)];\n\t\tif(listStack.length > 0 && listStack[0].tag !== listInfo.listTag) {\n\t\t\tbreak;\n\t\t}\n\t\t// Move past the list marker\n\t\tthis.parser.pos = match.index + match[0].length;\n\t\t// Walk through the list markers for the current row\n\t\tfor(var t=0; t<match[0].length; t++) {\n\t\t\tlistInfo = listTypes[match[0].charAt(t)];\n\t\t\t// Remove any stacked up element if we can't re-use it because the list type doesn't match\n\t\t\tif(listStack.length > t && listStack[t].tag !== listInfo.listTag) {\n\t\t\t\tlistStack.splice(t,listStack.length - t);\n\t\t\t}\n\t\t\t// Construct the list element or reuse the previous one at this level\n\t\t\tif(listStack.length <= t) {\n\t\t\t\tvar listElement = {type: \"element\", tag: listInfo.listTag, children: [\n\t\t\t\t\t{type: \"element\", tag: listInfo.itemTag, children: []}\n\t\t\t\t]};\n\t\t\t\t// Link this list element into the last child item of the parent list item\n\t\t\t\tif(t) {\n\t\t\t\t\tvar prevListItem = listStack[t-1].children[listStack[t-1].children.length-1];\n\t\t\t\t\tprevListItem.children.push(listElement);\n\t\t\t\t}\n\t\t\t\t// Save this element in the stack\n\t\t\t\tlistStack[t] = listElement;\n\t\t\t} else if(t === (match[0].length - 1)) {\n\t\t\t\tlistStack[t].children.push({type: \"element\", tag: listInfo.itemTag, children: []});\n\t\t\t}\n\t\t}\n\t\tif(listStack.length > match[0].length) {\n\t\t\tlistStack.splice(match[0].length,listStack.length - match[0].length);\n\t\t}\n\t\t// Process the body of the list item into the last list item\n\t\tvar lastListChildren = listStack[listStack.length-1].children,\n\t\t\tlastListItem = lastListChildren[lastListChildren.length-1],\n\t\t\tclasses = this.parser.parseClasses();\n\t\tthis.parser.skipWhitespace({treatNewlinesAsNonWhitespace: true});\n\t\tvar tree = this.parser.parseInlineRun(/(\\r?\\n)/mg);\n\t\tlastListItem.children.push.apply(lastListItem.children,tree);\n\t\tif(classes.length > 0) {\n\t\t\t$tw.utils.addClassToParseTreeNode(lastListItem,classes.join(\" \"));\n\t\t}\n\t\t// Consume any whitespace following the list item\n\t\tthis.parser.skipWhitespace();\n\t}\n\t// Return the root element of the list\n\treturn [listStack[0]];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/list.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/macrocallblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/macrocallblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki rule for block macro calls\n\n```\n<<name value value2>>\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"macrocallblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /<<([^>\\s]+)(?:\\s*)((?:[^>]|(?:>(?!>)))*?)>>(?:\\r?\\n|$)/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get all the details of the match\n\tvar macroName = this.match[1],\n\t\tparamString = this.match[2];\n\t// Move past the macro call\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tvar params = [],\n\t\treParam = /\\s*(?:([A-Za-z0-9\\-_]+)\\s*:)?(?:\\s*(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\"|'([^']*)'|\\[\\[([^\\]]*)\\]\\]|([^\"'\\s]+)))/mg,\n\t\tparamMatch = reParam.exec(paramString);\n\twhile(paramMatch) {\n\t\t// Process this parameter\n\t\tvar paramInfo = {\n\t\t\tvalue: paramMatch[2] || paramMatch[3] || paramMatch[4] || paramMatch[5] || paramMatch[6]\n\t\t};\n\t\tif(paramMatch[1]) {\n\t\t\tparamInfo.name = paramMatch[1];\n\t\t}\n\t\tparams.push(paramInfo);\n\t\t// Find the next match\n\t\tparamMatch = reParam.exec(paramString);\n\t}\n\treturn [{\n\t\ttype: \"macrocall\",\n\t\tname: macroName,\n\t\tparams: params,\n\t\tisBlock: true\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/macrocallblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/macrocallinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/macrocallinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki rule for macro calls\n\n```\n<<name value value2>>\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"macrocallinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /<<([^\\s>]+)\\s*([\\s\\S]*?)>>/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get all the details of the match\n\tvar macroName = this.match[1],\n\t\tparamString = this.match[2];\n\t// Move past the macro call\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tvar params = [],\n\t\treParam = /\\s*(?:([A-Za-z0-9\\-_]+)\\s*:)?(?:\\s*(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\"|'([^']*)'|\\[\\[([^\\]]*)\\]\\]|([^\"'\\s]+)))/mg,\n\t\tparamMatch = reParam.exec(paramString);\n\twhile(paramMatch) {\n\t\t// Process this parameter\n\t\tvar paramInfo = {\n\t\t\tvalue: paramMatch[2] || paramMatch[3] || paramMatch[4] || paramMatch[5]|| paramMatch[6]\n\t\t};\n\t\tif(paramMatch[1]) {\n\t\t\tparamInfo.name = paramMatch[1];\n\t\t}\n\t\tparams.push(paramInfo);\n\t\t// Find the next match\n\t\tparamMatch = reParam.exec(paramString);\n\t}\n\treturn [{\n\t\ttype: \"macrocall\",\n\t\tname: macroName,\n\t\tparams: params\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/macrocallinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/macrodef.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/macrodef.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki pragma rule for macro definitions\n\n```\n\\define name(param:defaultvalue,param2:defaultvalue)\ndefinition text, including $param$ markers\n\\end\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"macrodef\";\nexports.types = {pragma: true};\n\n/*\nInstantiate parse rule\n*/\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /^\\\\define\\s+([^(\\s]+)\\(\\s*([^)]*)\\)(\\s*\\r?\\n)?/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Move past the macro name and parameters\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Parse the parameters\n\tvar paramString = this.match[2],\n\t\tparams = [];\n\tif(paramString !== \"\") {\n\t\tvar reParam = /\\s*([A-Za-z0-9\\-_]+)(?:\\s*:\\s*(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\"|'([^']*)'|\\[\\[([^\\]]*)\\]\\]|([^\"'\\s]+)))?/mg,\n\t\t\tparamMatch = reParam.exec(paramString);\n\t\twhile(paramMatch) {\n\t\t\t// Save the parameter details\n\t\t\tvar paramInfo = {name: paramMatch[1]},\n\t\t\t\tdefaultValue = paramMatch[2] || paramMatch[3] || paramMatch[4] || paramMatch[5] || paramMatch[6];\n\t\t\tif(defaultValue) {\n\t\t\t\tparamInfo[\"default\"] = defaultValue;\n\t\t\t}\n\t\t\tparams.push(paramInfo);\n\t\t\t// Look for the next parameter\n\t\t\tparamMatch = reParam.exec(paramString);\n\t\t}\n\t}\n\t// Is this a multiline definition?\n\tvar reEnd;\n\tif(this.match[3]) {\n\t\t// If so, the end of the body is marked with \\end\n\t\treEnd = /(\\r?\\n\\\\end[^\\S\\n\\r]*(?:$|\\r?\\n))/mg;\n\t} else {\n\t\t// Otherwise, the end of the definition is marked by the end of the line\n\t\treEnd = /(\\r?\\n)/mg;\n\t\t// Move past any whitespace\n\t\tthis.parser.pos = $tw.utils.skipWhiteSpace(this.parser.source,this.parser.pos);\n\t}\n\t// Find the end of the definition\n\treEnd.lastIndex = this.parser.pos;\n\tvar text,\n\t\tendMatch = reEnd.exec(this.parser.source);\n\tif(endMatch) {\n\t\ttext = this.parser.source.substring(this.parser.pos,endMatch.index);\n\t\tthis.parser.pos = endMatch.index + endMatch[0].length;\n\t} else {\n\t\t// We didn't find the end of the definition, so we'll make it blank\n\t\ttext = \"\";\n\t}\n\t// Save the macro definition\n\treturn [{\n\t\ttype: \"set\",\n\t\tattributes: {\n\t\t\tname: {type: \"string\", value: this.match[1]},\n\t\t\tvalue: {type: \"string\", value: text}\n\t\t},\n\t\tchildren: [],\n\t\tparams: params\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/macrodef.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/prettyextlink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/prettyextlink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for external links. For example:\n\n```\n[ext[http://tiddlywiki.com/fractalveg.jpg]]\n[ext[Tooltip|http://tiddlywiki.com/fractalveg.jpg]]\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"prettyextlink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n};\n\nexports.findNextMatch = function(startPos) {\n\t// Find the next tag\n\tthis.nextLink = this.findNextLink(this.parser.source,startPos);\n\treturn this.nextLink ? this.nextLink.start : undefined;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.nextLink.end;\n\treturn [this.nextLink];\n};\n\n/*\nFind the next link from the current position\n*/\nexports.findNextLink = function(source,pos) {\n\t// A regexp for finding candidate links\n\tvar reLookahead = /(\\[ext\\[)/g;\n\t// Find the next candidate\n\treLookahead.lastIndex = pos;\n\tvar match = reLookahead.exec(source);\n\twhile(match) {\n\t\t// Try to parse the candidate as a link\n\t\tvar link = this.parseLink(source,match.index);\n\t\t// Return success\n\t\tif(link) {\n\t\t\treturn link;\n\t\t}\n\t\t// Look for the next match\n\t\treLookahead.lastIndex = match.index + 1;\n\t\tmatch = reLookahead.exec(source);\n\t}\n\t// Failed\n\treturn null;\n};\n\n/*\nLook for an link at the specified position. Returns null if not found, otherwise returns {type: \"element\", tag: \"a\", attributes: [], isSelfClosing:, start:, end:,}\n*/\nexports.parseLink = function(source,pos) {\n\tvar token,\n\t\ttextNode = {\n\t\t\ttype: \"text\"\n\t\t},\n\t\tnode = {\n\t\t\ttype: \"element\",\n\t\t\ttag: \"a\",\n\t\t\tstart: pos,\n\t\t\tattributes: {\n\t\t\t\t\"class\": {type: \"string\", value: \"tc-tiddlylink-external\"},\n\t\t\t},\n\t\t\tchildren: [textNode]\n\t\t};\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for the `[ext[`\n\ttoken = $tw.utils.parseTokenString(source,pos,\"[ext[\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Look ahead for the terminating `]]`\n\tvar closePos = source.indexOf(\"]]\",pos);\n\tif(closePos === -1) {\n\t\treturn null;\n\t}\n\t// Look for a `|` separating the tooltip\n\tvar splitPos = source.indexOf(\"|\",pos);\n\tif(splitPos === -1 || splitPos > closePos) {\n\t\tsplitPos = null;\n\t}\n\t// Pull out the tooltip and URL\n\tvar tooltip, URL;\n\tif(splitPos) {\n\t\tURL = source.substring(splitPos + 1,closePos).trim();\n\t\ttextNode.text = source.substring(pos,splitPos).trim();\n\t} else {\n\t\tURL = source.substring(pos,closePos).trim();\n\t\ttextNode.text = URL;\n\t}\n\tnode.attributes.href = {type: \"string\", value: URL};\n\tnode.attributes.target = {type: \"string\", value: \"_blank\"};\n\tnode.attributes.rel = {type: \"string\", value: \"noopener noreferrer\"};\n\t// Update the end position\n\tnode.end = closePos + 2;\n\treturn node;\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/prettyextlink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/prettylink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/prettylink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for pretty links. For example:\n\n```\n[[Introduction]]\n\n[[Link description|TiddlerTitle]]\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"prettylink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\[\\[(.*?)(?:\\|(.*?))?\\]\\]/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Process the link\n\tvar text = this.match[1],\n\t\tlink = this.match[2] || text;\n\tif($tw.utils.isLinkExternal(link)) {\n\t\treturn [{\n\t\t\ttype: \"element\",\n\t\t\ttag: \"a\",\n\t\t\tattributes: {\n\t\t\t\thref: {type: \"string\", value: link},\n\t\t\t\t\"class\": {type: \"string\", value: \"tc-tiddlylink-external\"},\n\t\t\t\ttarget: {type: \"string\", value: \"_blank\"},\n\t\t\t\trel: {type: \"string\", value: \"noopener noreferrer\"}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\", text: text\n\t\t\t}]\n\t\t}];\n\t} else {\n\t\treturn [{\n\t\t\ttype: \"link\",\n\t\t\tattributes: {\n\t\t\t\tto: {type: \"string\", value: link}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\", text: text\n\t\t\t}]\n\t\t}];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/prettylink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/quoteblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/quoteblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for quote blocks. For example:\n\n```\n\t<<<.optionalClass(es) optional cited from\n\ta quote\n\t<<<\n\t\n\t<<<.optionalClass(es)\n\ta quote\n\t<<< optional cited from\n```\n\nQuotes can be quoted by putting more <s\n\n```\n\t<<<\n\tQuote Level 1\n\t\n\t<<<<\n\tQuoteLevel 2\n\t<<<<\n\t\n\t<<<\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"quoteblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /(<<<+)/mg;\n};\n\nexports.parse = function() {\n\tvar classes = [\"tc-quote\"];\n\t// Get all the details of the match\n\tvar reEndString = \"^\" + this.match[1] + \"(?!<)\";\n\t// Move past the <s\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t\n\t// Parse any classes, whitespace and then the optional cite itself\n\tclasses.push.apply(classes, this.parser.parseClasses());\n\tthis.parser.skipWhitespace({treatNewlinesAsNonWhitespace: true});\n\tvar cite = this.parser.parseInlineRun(/(\\r?\\n)/mg);\n\t// before handling the cite, parse the body of the quote\n\tvar tree= this.parser.parseBlocks(reEndString);\n\t// If we got a cite, put it before the text\n\tif(cite.length > 0) {\n\t\ttree.unshift({\n\t\t\ttype: \"element\",\n\t\t\ttag: \"cite\",\n\t\t\tchildren: cite\n\t\t});\n\t}\n\t// Parse any optional cite\n\tthis.parser.skipWhitespace({treatNewlinesAsNonWhitespace: true});\n\tcite = this.parser.parseInlineRun(/(\\r?\\n)/mg);\n\t// If we got a cite, push it\n\tif(cite.length > 0) {\n\t\ttree.push({\n\t\t\ttype: \"element\",\n\t\t\ttag: \"cite\",\n\t\t\tchildren: cite\n\t\t});\n\t}\n\t// Return the blockquote element\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"blockquote\",\n\t\tattributes: {\n\t\t\tclass: { type: \"string\", value: classes.join(\" \") },\n\t\t},\n\t\tchildren: tree\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/quoteblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/rules.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/rules.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki pragma rule for rules specifications\n\n```\n\\rules except ruleone ruletwo rulethree\n\\rules only ruleone ruletwo rulethree\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"rules\";\nexports.types = {pragma: true};\n\n/*\nInstantiate parse rule\n*/\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /^\\\\rules[^\\S\\n]/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Move past the pragma invocation\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Parse whitespace delimited tokens terminated by a line break\n\tvar reMatch = /[^\\S\\n]*(\\S+)|(\\r?\\n)/mg,\n\t\ttokens = [];\n\treMatch.lastIndex = this.parser.pos;\n\tvar match = reMatch.exec(this.parser.source);\n\twhile(match && match.index === this.parser.pos) {\n\t\tthis.parser.pos = reMatch.lastIndex;\n\t\t// Exit if we've got the line break\n\t\tif(match[2]) {\n\t\t\tbreak;\n\t\t}\n\t\t// Process the token\n\t\tif(match[1]) {\n\t\t\ttokens.push(match[1]);\n\t\t}\n\t\t// Match the next token\n\t\tmatch = reMatch.exec(this.parser.source);\n\t}\n\t// Process the tokens\n\tif(tokens.length > 0) {\n\t\tthis.parser.amendRules(tokens[0],tokens.slice(1));\n\t}\n\t// No parse tree nodes to return\n\treturn [];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/rules.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/styleblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/styleblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for assigning styles and classes to paragraphs and other blocks. For example:\n\n```\n@@.myClass\n@@background-color:red;\nThis paragraph will have the CSS class `myClass`.\n\n* The `<ul>` around this list will also have the class `myClass`\n* List item 2\n\n@@\n```\n\nNote that classes and styles can be mixed subject to the rule that styles must precede classes. For example\n\n```\n@@.myFirstClass.mySecondClass\n@@width:100px;.myThirdClass\nThis is a paragraph\n@@\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"styleblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /@@((?:[^\\.\\r\\n\\s:]+:[^\\r\\n;]+;)+)?(?:\\.([^\\r\\n\\s]+))?\\r?\\n/mg;\n};\n\nexports.parse = function() {\n\tvar reEndString = \"^@@(?:\\\\r?\\\\n)?\";\n\tvar classes = [], styles = [];\n\tdo {\n\t\t// Get the class and style\n\t\tif(this.match[1]) {\n\t\t\tstyles.push(this.match[1]);\n\t\t}\n\t\tif(this.match[2]) {\n\t\t\tclasses.push(this.match[2].split(\".\").join(\" \"));\n\t\t}\n\t\t// Move past the match\n\t\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t\t// Look for another line of classes and styles\n\t\tthis.match = this.matchRegExp.exec(this.parser.source);\n\t} while(this.match && this.match.index === this.parser.pos);\n\t// Parse the body\n\tvar tree = this.parser.parseBlocks(reEndString);\n\tfor(var t=0; t<tree.length; t++) {\n\t\tif(classes.length > 0) {\n\t\t\t$tw.utils.addClassToParseTreeNode(tree[t],classes.join(\" \"));\n\t\t}\n\t\tif(styles.length > 0) {\n\t\t\t$tw.utils.addAttributeToParseTreeNode(tree[t],\"style\",styles.join(\"\"));\n\t\t}\n\t}\n\treturn tree;\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/styleblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/styleinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/styleinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for assigning styles and classes to inline runs. For example:\n\n```\n@@.myClass This is some text with a class@@\n@@background-color:red;This is some text with a background colour@@\n@@width:100px;.myClass This is some text with a class and a width@@\n```\n\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"styleinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /@@((?:[^\\.\\r\\n\\s:]+:[^\\r\\n;]+;)+)?(\\.(?:[^\\r\\n\\s]+)\\s+)?/mg;\n};\n\nexports.parse = function() {\n\tvar reEnd = /@@/g;\n\t// Get the styles and class\n\tvar stylesString = this.match[1],\n\t\tclassString = this.match[2] ? this.match[2].split(\".\").join(\" \") : undefined;\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Parse the run up to the terminator\n\tvar tree = this.parser.parseInlineRun(reEnd,{eatTerminator: true});\n\t// Return the classed span\n\tvar node = {\n\t\ttype: \"element\",\n\t\ttag: \"span\",\n\t\tattributes: {\n\t\t\t\"class\": {type: \"string\", value: \"tc-inline-style\"}\n\t\t},\n\t\tchildren: tree\n\t};\n\tif(classString) {\n\t\t$tw.utils.addClassToParseTreeNode(node,classString);\n\t}\n\tif(stylesString) {\n\t\t$tw.utils.addAttributeToParseTreeNode(node,\"style\",stylesString);\n\t}\n\treturn [node];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/styleinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/syslink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/syslink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for system tiddler links.\nCan be suppressed preceding them with `~`.\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"syslink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /~?\\$:\\/[a-zA-Z0-9/.\\-_]+/mg;\n};\n\nexports.parse = function() {\n\tvar match = this.match[0];\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Create the link unless it is suppressed\n\tif(match.substr(0,1) === \"~\") {\n\t\treturn [{type: \"text\", text: match.substr(1)}];\n\t} else {\n\t\treturn [{\n\t\t\ttype: \"link\",\n\t\t\tattributes: {\n\t\t\t\tto: {type: \"string\", value: match}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\",\n\t\t\t\ttext: match\n\t\t\t}]\n\t\t}];\n\t}\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/syslink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/table.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/table.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for tables.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"table\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /^\\|(?:[^\\n]*)\\|(?:[fhck]?)\\r?(?:\\n|$)/mg;\n};\n\nvar processRow = function(prevColumns) {\n\tvar cellRegExp = /(?:\\|([^\\n\\|]*)\\|)|(\\|[fhck]?\\r?(?:\\n|$))/mg,\n\t\tcellTermRegExp = /((?:\\x20*)\\|)/mg,\n\t\ttree = [],\n\t\tcol = 0,\n\t\tcolSpanCount = 1,\n\t\tprevCell,\n\t\tvAlign;\n\t// Match a single cell\n\tcellRegExp.lastIndex = this.parser.pos;\n\tvar cellMatch = cellRegExp.exec(this.parser.source);\n\twhile(cellMatch && cellMatch.index === this.parser.pos) {\n\t\tif(cellMatch[1] === \"~\") {\n\t\t\t// Rowspan\n\t\t\tvar last = prevColumns[col];\n\t\t\tif(last) {\n\t\t\t\tlast.rowSpanCount++;\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(last.element,\"rowspan\",last.rowSpanCount);\n\t\t\t\tvAlign = $tw.utils.getAttributeValueFromParseTreeNode(last.element,\"valign\",\"center\");\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(last.element,\"valign\",vAlign);\n\t\t\t\tif(colSpanCount > 1) {\n\t\t\t\t\t$tw.utils.addAttributeToParseTreeNode(last.element,\"colspan\",colSpanCount);\n\t\t\t\t\tcolSpanCount = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Move to just before the `|` terminating the cell\n\t\t\tthis.parser.pos = cellRegExp.lastIndex - 1;\n\t\t} else if(cellMatch[1] === \">\") {\n\t\t\t// Colspan\n\t\t\tcolSpanCount++;\n\t\t\t// Move to just before the `|` terminating the cell\n\t\t\tthis.parser.pos = cellRegExp.lastIndex - 1;\n\t\t} else if(cellMatch[1] === \"<\" && prevCell) {\n\t\t\tcolSpanCount = 1 + $tw.utils.getAttributeValueFromParseTreeNode(prevCell,\"colspan\",1);\n\t\t\t$tw.utils.addAttributeToParseTreeNode(prevCell,\"colspan\",colSpanCount);\n\t\t\tcolSpanCount = 1;\n\t\t\t// Move to just before the `|` terminating the cell\n\t\t\tthis.parser.pos = cellRegExp.lastIndex - 1;\n\t\t} else if(cellMatch[2]) {\n\t\t\t// End of row\n\t\t\tif(prevCell && colSpanCount > 1) {\n\t\t\t\tif(prevCell.attributes && prevCell.attributes && prevCell.attributes.colspan) {\n\t\t\t\t\t\tcolSpanCount += prevCell.attributes.colspan.value;\n\t\t\t\t} else {\n\t\t\t\t\tcolSpanCount -= 1;\n\t\t\t\t}\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(prevCell,\"colspan\",colSpanCount);\n\t\t\t}\n\t\t\tthis.parser.pos = cellRegExp.lastIndex - 1;\n\t\t\tbreak;\n\t\t} else {\n\t\t\t// For ordinary cells, step beyond the opening `|`\n\t\t\tthis.parser.pos++;\n\t\t\t// Look for a space at the start of the cell\n\t\t\tvar spaceLeft = false;\n\t\t\tvAlign = null;\n\t\t\tif(this.parser.source.substr(this.parser.pos).search(/^\\^([^\\^]|\\^\\^)/) === 0) {\n\t\t\t\tvAlign = \"top\";\n\t\t\t} else if(this.parser.source.substr(this.parser.pos).search(/^,([^,]|,,)/) === 0) {\n\t\t\t\tvAlign = \"bottom\";\n\t\t\t}\n\t\t\tif(vAlign) {\n\t\t\t\tthis.parser.pos++;\n\t\t\t}\n\t\t\tvar chr = this.parser.source.substr(this.parser.pos,1);\n\t\t\twhile(chr === \" \") {\n\t\t\t\tspaceLeft = true;\n\t\t\t\tthis.parser.pos++;\n\t\t\t\tchr = this.parser.source.substr(this.parser.pos,1);\n\t\t\t}\n\t\t\t// Check whether this is a heading cell\n\t\t\tvar cell;\n\t\t\tif(chr === \"!\") {\n\t\t\t\tthis.parser.pos++;\n\t\t\t\tcell = {type: \"element\", tag: \"th\", children: []};\n\t\t\t} else {\n\t\t\t\tcell = {type: \"element\", tag: \"td\", children: []};\n\t\t\t}\n\t\t\ttree.push(cell);\n\t\t\t// Record information about this cell\n\t\t\tprevCell = cell;\n\t\t\tprevColumns[col] = {rowSpanCount:1,element:cell};\n\t\t\t// Check for a colspan\n\t\t\tif(colSpanCount > 1) {\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(cell,\"colspan\",colSpanCount);\n\t\t\t\tcolSpanCount = 1;\n\t\t\t}\n\t\t\t// Parse the cell\n\t\t\tcell.children = this.parser.parseInlineRun(cellTermRegExp,{eatTerminator: true});\n\t\t\t// Set the alignment for the cell\n\t\t\tif(vAlign) {\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(cell,\"valign\",vAlign);\n\t\t\t}\n\t\t\tif(this.parser.source.substr(this.parser.pos - 2,1) === \" \") { // spaceRight\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(cell,\"align\",spaceLeft ? \"center\" : \"left\");\n\t\t\t} else if(spaceLeft) {\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(cell,\"align\",\"right\");\n\t\t\t}\n\t\t\t// Move back to the closing `|`\n\t\t\tthis.parser.pos--;\n\t\t}\n\t\tcol++;\n\t\tcellRegExp.lastIndex = this.parser.pos;\n\t\tcellMatch = cellRegExp.exec(this.parser.source);\n\t}\n\treturn tree;\n};\n\nexports.parse = function() {\n\tvar rowContainerTypes = {\"c\":\"caption\", \"h\":\"thead\", \"\":\"tbody\", \"f\":\"tfoot\"},\n\t\ttable = {type: \"element\", tag: \"table\", children: []},\n\t\trowRegExp = /^\\|([^\\n]*)\\|([fhck]?)\\r?(?:\\n|$)/mg,\n\t\trowTermRegExp = /(\\|(?:[fhck]?)\\r?(?:\\n|$))/mg,\n\t\tprevColumns = [],\n\t\tcurrRowType,\n\t\trowContainer,\n\t\trowCount = 0;\n\t// Match the row\n\trowRegExp.lastIndex = this.parser.pos;\n\tvar rowMatch = rowRegExp.exec(this.parser.source);\n\twhile(rowMatch && rowMatch.index === this.parser.pos) {\n\t\tvar rowType = rowMatch[2];\n\t\t// Check if it is a class assignment\n\t\tif(rowType === \"k\") {\n\t\t\t$tw.utils.addClassToParseTreeNode(table,rowMatch[1]);\n\t\t\tthis.parser.pos = rowMatch.index + rowMatch[0].length;\n\t\t} else {\n\t\t\t// Otherwise, create a new row if this one is of a different type\n\t\t\tif(rowType !== currRowType) {\n\t\t\t\trowContainer = {type: \"element\", tag: rowContainerTypes[rowType], children: []};\n\t\t\t\ttable.children.push(rowContainer);\n\t\t\t\tcurrRowType = rowType;\n\t\t\t}\n\t\t\t// Is this a caption row?\n\t\t\tif(currRowType === \"c\") {\n\t\t\t\t// If so, move past the opening `|` of the row\n\t\t\t\tthis.parser.pos++;\n\t\t\t\t// Move the caption to the first row if it isn't already\n\t\t\t\tif(table.children.length !== 1) {\n\t\t\t\t\ttable.children.pop(); // Take rowContainer out of the children array\n\t\t\t\t\ttable.children.splice(0,0,rowContainer); // Insert it at the bottom\t\t\t\t\t\t\n\t\t\t\t}\n\t\t\t\t// Set the alignment - TODO: figure out why TW did this\n//\t\t\t\trowContainer.attributes.align = rowCount === 0 ? \"top\" : \"bottom\";\n\t\t\t\t// Parse the caption\n\t\t\t\trowContainer.children = this.parser.parseInlineRun(rowTermRegExp,{eatTerminator: true});\n\t\t\t} else {\n\t\t\t\t// Create the row\n\t\t\t\tvar theRow = {type: \"element\", tag: \"tr\", children: []};\n\t\t\t\t$tw.utils.addClassToParseTreeNode(theRow,rowCount%2 ? \"oddRow\" : \"evenRow\");\n\t\t\t\trowContainer.children.push(theRow);\n\t\t\t\t// Process the row\n\t\t\t\ttheRow.children = processRow.call(this,prevColumns);\n\t\t\t\tthis.parser.pos = rowMatch.index + rowMatch[0].length;\n\t\t\t\t// Increment the row count\n\t\t\t\trowCount++;\n\t\t\t}\n\t\t}\n\t\trowMatch = rowRegExp.exec(this.parser.source);\n\t}\n\treturn [table];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/table.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/transcludeblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/transcludeblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for block-level transclusion. For example:\n\n```\n{{MyTiddler}}\n{{MyTiddler||TemplateTitle}}\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"transcludeblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\{\\{([^\\{\\}\\|]*)(?:\\|\\|([^\\|\\{\\}]+))?\\}\\}(?:\\r?\\n|$)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Get the match details\n\tvar template = $tw.utils.trim(this.match[2]),\n\t\ttextRef = $tw.utils.trim(this.match[1]);\n\t// Prepare the transclude widget\n\tvar transcludeNode = {\n\t\t\ttype: \"transclude\",\n\t\t\tattributes: {},\n\t\t\tisBlock: true\n\t\t};\n\t// Prepare the tiddler widget\n\tvar tr, targetTitle, targetField, targetIndex, tiddlerNode;\n\tif(textRef) {\n\t\ttr = $tw.utils.parseTextReference(textRef);\n\t\ttargetTitle = tr.title;\n\t\ttargetField = tr.field;\n\t\ttargetIndex = tr.index;\n\t\ttiddlerNode = {\n\t\t\ttype: \"tiddler\",\n\t\t\tattributes: {\n\t\t\t\ttiddler: {type: \"string\", value: targetTitle}\n\t\t\t},\n\t\t\tisBlock: true,\n\t\t\tchildren: [transcludeNode]\n\t\t};\n\t}\n\tif(template) {\n\t\ttranscludeNode.attributes.tiddler = {type: \"string\", value: template};\n\t\tif(textRef) {\n\t\t\treturn [tiddlerNode];\n\t\t} else {\n\t\t\treturn [transcludeNode];\n\t\t}\n\t} else {\n\t\tif(textRef) {\n\t\t\ttranscludeNode.attributes.tiddler = {type: \"string\", value: targetTitle};\n\t\t\tif(targetField) {\n\t\t\t\ttranscludeNode.attributes.field = {type: \"string\", value: targetField};\n\t\t\t}\n\t\t\tif(targetIndex) {\n\t\t\t\ttranscludeNode.attributes.index = {type: \"string\", value: targetIndex};\n\t\t\t}\n\t\t\treturn [tiddlerNode];\n\t\t} else {\n\t\t\treturn [transcludeNode];\n\t\t}\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/transcludeblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/transcludeinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/transcludeinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for inline-level transclusion. For example:\n\n```\n{{MyTiddler}}\n{{MyTiddler||TemplateTitle}}\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"transcludeinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\{\\{([^\\{\\}\\|]*)(?:\\|\\|([^\\|\\{\\}]+))?\\}\\}/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Get the match details\n\tvar template = $tw.utils.trim(this.match[2]),\n\t\ttextRef = $tw.utils.trim(this.match[1]);\n\t// Prepare the transclude widget\n\tvar transcludeNode = {\n\t\t\ttype: \"transclude\",\n\t\t\tattributes: {}\n\t\t};\n\t// Prepare the tiddler widget\n\tvar tr, targetTitle, targetField, targetIndex, tiddlerNode;\n\tif(textRef) {\n\t\ttr = $tw.utils.parseTextReference(textRef);\n\t\ttargetTitle = tr.title;\n\t\ttargetField = tr.field;\n\t\ttargetIndex = tr.index;\n\t\ttiddlerNode = {\n\t\t\ttype: \"tiddler\",\n\t\t\tattributes: {\n\t\t\t\ttiddler: {type: \"string\", value: targetTitle}\n\t\t\t},\n\t\t\tchildren: [transcludeNode]\n\t\t};\n\t}\n\tif(template) {\n\t\ttranscludeNode.attributes.tiddler = {type: \"string\", value: template};\n\t\tif(textRef) {\n\t\t\treturn [tiddlerNode];\n\t\t} else {\n\t\t\treturn [transcludeNode];\n\t\t}\n\t} else {\n\t\tif(textRef) {\n\t\t\ttranscludeNode.attributes.tiddler = {type: \"string\", value: targetTitle};\n\t\t\tif(targetField) {\n\t\t\t\ttranscludeNode.attributes.field = {type: \"string\", value: targetField};\n\t\t\t}\n\t\t\tif(targetIndex) {\n\t\t\t\ttranscludeNode.attributes.index = {type: \"string\", value: targetIndex};\n\t\t\t}\n\t\t\treturn [tiddlerNode];\n\t\t} else {\n\t\t\treturn [transcludeNode];\n\t\t}\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/transcludeinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/typedblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/typedblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for typed blocks. For example:\n\n```\n$$$.js\nThis will be rendered as JavaScript\n$$$\n\n$$$.svg\n<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"150\" height=\"100\">\n  <circle cx=\"100\" cy=\"50\" r=\"40\" stroke=\"black\" stroke-width=\"2\" fill=\"red\" />\n</svg>\n$$$\n\n$$$text/vnd.tiddlywiki>text/html\nThis will be rendered as an //HTML representation// of WikiText\n$$$\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.name = \"typedblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\$\\$\\$([^ >\\r\\n]*)(?: *> *([^ \\r\\n]+))?\\r?\\n/mg;\n};\n\nexports.parse = function() {\n\tvar reEnd = /\\r?\\n\\$\\$\\$\\r?(?:\\n|$)/mg;\n\t// Save the type\n\tvar parseType = this.match[1],\n\t\trenderType = this.match[2];\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Look for the end of the block\n\treEnd.lastIndex = this.parser.pos;\n\tvar match = reEnd.exec(this.parser.source),\n\t\ttext;\n\t// Process the block\n\tif(match) {\n\t\ttext = this.parser.source.substring(this.parser.pos,match.index);\n\t\tthis.parser.pos = match.index + match[0].length;\n\t} else {\n\t\ttext = this.parser.source.substr(this.parser.pos);\n\t\tthis.parser.pos = this.parser.sourceLength;\n\t}\n\t// Parse the block according to the specified type\n\tvar parser = this.parser.wiki.parseText(parseType,text,{defaultType: \"text/plain\"});\n\t// If there's no render type, just return the parse tree\n\tif(!renderType) {\n\t\treturn parser.tree;\n\t} else {\n\t\t// Otherwise, render to the rendertype and return in a <PRE> tag\n\t\tvar widgetNode = this.parser.wiki.makeWidget(parser),\n\t\t\tcontainer = $tw.fakeDocument.createElement(\"div\");\n\t\twidgetNode.render(container,null);\n\t\ttext = renderType === \"text/html\" ? container.innerHTML : container.textContent;\n\t\treturn [{\n\t\t\ttype: \"element\",\n\t\t\ttag: \"pre\",\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\",\n\t\t\t\ttext: text\n\t\t\t}]\n\t\t}];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/typedblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/wikilink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/wikilink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for wiki links. For example:\n\n```\nAWikiLink\nAnotherLink\n~SuppressedLink\n```\n\nPrecede a camel case word with `~` to prevent it from being recognised as a link.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"wikilink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = new RegExp($tw.config.textPrimitives.unWikiLink + \"?\" + $tw.config.textPrimitives.wikiLink,\"mg\");\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get the details of the match\n\tvar linkText = this.match[0];\n\t// Move past the macro call\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// If the link starts with the unwikilink character then just output it as plain text\n\tif(linkText.substr(0,1) === $tw.config.textPrimitives.unWikiLink) {\n\t\treturn [{type: \"text\", text: linkText.substr(1)}];\n\t}\n\t// If the link has been preceded with a blocked letter then don't treat it as a link\n\tif(this.match.index > 0) {\n\t\tvar preRegExp = new RegExp($tw.config.textPrimitives.blockPrefixLetters,\"mg\");\n\t\tpreRegExp.lastIndex = this.match.index-1;\n\t\tvar preMatch = preRegExp.exec(this.parser.source);\n\t\tif(preMatch && preMatch.index === this.match.index-1) {\n\t\t\treturn [{type: \"text\", text: linkText}];\n\t\t}\n\t}\n\treturn [{\n\t\ttype: \"link\",\n\t\tattributes: {\n\t\t\tto: {type: \"string\", value: linkText}\n\t\t},\n\t\tchildren: [{\n\t\t\ttype: \"text\",\n\t\t\ttext: linkText\n\t\t}]\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/wikilink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/wikiparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/wikiparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe wiki text parser processes blocks of source text into a parse tree.\n\nThe parse tree is made up of nested arrays of these JavaScript objects:\n\n\t{type: \"element\", tag: <string>, attributes: {}, children: []} - an HTML element\n\t{type: \"text\", text: <string>} - a text node\n\t{type: \"entity\", value: <string>} - an entity\n\t{type: \"raw\", html: <string>} - raw HTML\n\nAttributes are stored as hashmaps of the following objects:\n\n\t{type: \"string\", value: <string>} - literal string\n\t{type: \"indirect\", textReference: <textReference>} - indirect through a text reference\n\t{type: \"macro\", macro: <TBD>} - indirect through a macro invocation\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar WikiParser = function(type,text,options) {\n\tthis.wiki = options.wiki;\n\tvar self = this;\n\t// Check for an externally linked tiddler\n\tif($tw.browser && (text || \"\") === \"\" && options._canonical_uri) {\n\t\tthis.loadRemoteTiddler(options._canonical_uri);\n\t\ttext = $tw.language.getRawString(\"LazyLoadingWarning\");\n\t}\n\t// Initialise the classes if we don't have them already\n\tif(!this.pragmaRuleClasses) {\n\t\tWikiParser.prototype.pragmaRuleClasses = $tw.modules.createClassesFromModules(\"wikirule\",\"pragma\",$tw.WikiRuleBase);\n\t\tthis.setupRules(WikiParser.prototype.pragmaRuleClasses,\"$:/config/WikiParserRules/Pragmas/\");\n\t}\n\tif(!this.blockRuleClasses) {\n\t\tWikiParser.prototype.blockRuleClasses = $tw.modules.createClassesFromModules(\"wikirule\",\"block\",$tw.WikiRuleBase);\n\t\tthis.setupRules(WikiParser.prototype.blockRuleClasses,\"$:/config/WikiParserRules/Block/\");\n\t}\n\tif(!this.inlineRuleClasses) {\n\t\tWikiParser.prototype.inlineRuleClasses = $tw.modules.createClassesFromModules(\"wikirule\",\"inline\",$tw.WikiRuleBase);\n\t\tthis.setupRules(WikiParser.prototype.inlineRuleClasses,\"$:/config/WikiParserRules/Inline/\");\n\t}\n\t// Save the parse text\n\tthis.type = type || \"text/vnd.tiddlywiki\";\n\tthis.source = text || \"\";\n\tthis.sourceLength = this.source.length;\n\t// Set current parse position\n\tthis.pos = 0;\n\t// Instantiate the pragma parse rules\n\tthis.pragmaRules = this.instantiateRules(this.pragmaRuleClasses,\"pragma\",0);\n\t// Instantiate the parser block and inline rules\n\tthis.blockRules = this.instantiateRules(this.blockRuleClasses,\"block\",0);\n\tthis.inlineRules = this.instantiateRules(this.inlineRuleClasses,\"inline\",0);\n\t// Parse any pragmas\n\tthis.tree = [];\n\tvar topBranch = this.parsePragmas();\n\t// Parse the text into inline runs or blocks\n\tif(options.parseAsInline) {\n\t\ttopBranch.push.apply(topBranch,this.parseInlineRun());\n\t} else {\n\t\ttopBranch.push.apply(topBranch,this.parseBlocks());\n\t}\n\t// Return the parse tree\n};\n\n/*\n*/\nWikiParser.prototype.loadRemoteTiddler = function(url) {\n\tvar self = this;\n\t$tw.utils.httpRequest({\n\t\turl: url,\n\t\ttype: \"GET\",\n\t\tcallback: function(err,data) {\n\t\t\tif(!err) {\n\t\t\t\tvar tiddlers = self.wiki.deserializeTiddlers(\".tid\",data,self.wiki.getCreationFields());\n\t\t\t\t$tw.utils.each(tiddlers,function(tiddler) {\n\t\t\t\t\ttiddler[\"_canonical_uri\"] = url;\n\t\t\t\t});\n\t\t\t\tif(tiddlers) {\n\t\t\t\t\tself.wiki.addTiddlers(tiddlers);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n};\n\n/*\n*/\nWikiParser.prototype.setupRules = function(proto,configPrefix) {\n\tvar self = this;\n\tif(!$tw.safemode) {\n\t\t$tw.utils.each(proto,function(object,name) {\n\t\t\tif(self.wiki.getTiddlerText(configPrefix + name,\"enable\") !== \"enable\") {\n\t\t\t\tdelete proto[name];\n\t\t\t}\n\t\t});\n\t}\n};\n\n/*\nInstantiate an array of parse rules\n*/\nWikiParser.prototype.instantiateRules = function(classes,type,startPos) {\n\tvar rulesInfo = [],\n\t\tself = this;\n\t$tw.utils.each(classes,function(RuleClass) {\n\t\t// Instantiate the rule\n\t\tvar rule = new RuleClass(self);\n\t\trule.is = {};\n\t\trule.is[type] = true;\n\t\trule.init(self);\n\t\tvar matchIndex = rule.findNextMatch(startPos);\n\t\tif(matchIndex !== undefined) {\n\t\t\trulesInfo.push({\n\t\t\t\trule: rule,\n\t\t\t\tmatchIndex: matchIndex\n\t\t\t});\n\t\t}\n\t});\n\treturn rulesInfo;\n};\n\n/*\nSkip any whitespace at the current position. Options are:\n\ttreatNewlinesAsNonWhitespace: true if newlines are NOT to be treated as whitespace\n*/\nWikiParser.prototype.skipWhitespace = function(options) {\n\toptions = options || {};\n\tvar whitespaceRegExp = options.treatNewlinesAsNonWhitespace ? /([^\\S\\n]+)/mg : /(\\s+)/mg;\n\twhitespaceRegExp.lastIndex = this.pos;\n\tvar whitespaceMatch = whitespaceRegExp.exec(this.source);\n\tif(whitespaceMatch && whitespaceMatch.index === this.pos) {\n\t\tthis.pos = whitespaceRegExp.lastIndex;\n\t}\n};\n\n/*\nGet the next match out of an array of parse rule instances\n*/\nWikiParser.prototype.findNextMatch = function(rules,startPos) {\n\t// Find the best matching rule by finding the closest match position\n\tvar matchingRule,\n\t\tmatchingRulePos = this.sourceLength;\n\t// Step through each rule\n\tfor(var t=0; t<rules.length; t++) {\n\t\tvar ruleInfo = rules[t];\n\t\t// Ask the rule to get the next match if we've moved past the current one\n\t\tif(ruleInfo.matchIndex !== undefined  && ruleInfo.matchIndex < startPos) {\n\t\t\truleInfo.matchIndex = ruleInfo.rule.findNextMatch(startPos);\n\t\t}\n\t\t// Adopt this match if it's closer than the current best match\n\t\tif(ruleInfo.matchIndex !== undefined && ruleInfo.matchIndex <= matchingRulePos) {\n\t\t\tmatchingRule = ruleInfo;\n\t\t\tmatchingRulePos = ruleInfo.matchIndex;\n\t\t}\n\t}\n\treturn matchingRule;\n};\n\n/*\nParse any pragmas at the beginning of a block of parse text\n*/\nWikiParser.prototype.parsePragmas = function() {\n\tvar currentTreeBranch = this.tree;\n\twhile(true) {\n\t\t// Skip whitespace\n\t\tthis.skipWhitespace();\n\t\t// Check for the end of the text\n\t\tif(this.pos >= this.sourceLength) {\n\t\t\tbreak;\n\t\t}\n\t\t// Check if we've arrived at a pragma rule match\n\t\tvar nextMatch = this.findNextMatch(this.pragmaRules,this.pos);\n\t\t// If not, just exit\n\t\tif(!nextMatch || nextMatch.matchIndex !== this.pos) {\n\t\t\tbreak;\n\t\t}\n\t\t// Process the pragma rule\n\t\tvar subTree = nextMatch.rule.parse();\n\t\tif(subTree.length > 0) {\n\t\t\t// Quick hack; we only cope with a single parse tree node being returned, which is true at the moment\n\t\t\tcurrentTreeBranch.push.apply(currentTreeBranch,subTree);\n\t\t\tsubTree[0].children = [];\n\t\t\tcurrentTreeBranch = subTree[0].children;\n\t\t}\n\t}\n\treturn currentTreeBranch;\n};\n\n/*\nParse a block from the current position\n\tterminatorRegExpString: optional regular expression string that identifies the end of plain paragraphs. Must not include capturing parenthesis\n*/\nWikiParser.prototype.parseBlock = function(terminatorRegExpString) {\n\tvar terminatorRegExp = terminatorRegExpString ? new RegExp(\"(\" + terminatorRegExpString + \"|\\\\r?\\\\n\\\\r?\\\\n)\",\"mg\") : /(\\r?\\n\\r?\\n)/mg;\n\tthis.skipWhitespace();\n\tif(this.pos >= this.sourceLength) {\n\t\treturn [];\n\t}\n\t// Look for a block rule that applies at the current position\n\tvar nextMatch = this.findNextMatch(this.blockRules,this.pos);\n\tif(nextMatch && nextMatch.matchIndex === this.pos) {\n\t\treturn nextMatch.rule.parse();\n\t}\n\t// Treat it as a paragraph if we didn't find a block rule\n\treturn [{type: \"element\", tag: \"p\", children: this.parseInlineRun(terminatorRegExp)}];\n};\n\n/*\nParse a series of blocks of text until a terminating regexp is encountered or the end of the text\n\tterminatorRegExpString: terminating regular expression\n*/\nWikiParser.prototype.parseBlocks = function(terminatorRegExpString) {\n\tif(terminatorRegExpString) {\n\t\treturn this.parseBlocksTerminated(terminatorRegExpString);\n\t} else {\n\t\treturn this.parseBlocksUnterminated();\n\t}\n};\n\n/*\nParse a block from the current position to the end of the text\n*/\nWikiParser.prototype.parseBlocksUnterminated = function() {\n\tvar tree = [];\n\twhile(this.pos < this.sourceLength) {\n\t\ttree.push.apply(tree,this.parseBlock());\n\t}\n\treturn tree;\n};\n\n/*\nParse blocks of text until a terminating regexp is encountered\n*/\nWikiParser.prototype.parseBlocksTerminated = function(terminatorRegExpString) {\n\tvar terminatorRegExp = new RegExp(\"(\" + terminatorRegExpString + \")\",\"mg\"),\n\t\ttree = [];\n\t// Skip any whitespace\n\tthis.skipWhitespace();\n\t//  Check if we've got the end marker\n\tterminatorRegExp.lastIndex = this.pos;\n\tvar match = terminatorRegExp.exec(this.source);\n\t// Parse the text into blocks\n\twhile(this.pos < this.sourceLength && !(match && match.index === this.pos)) {\n\t\tvar blocks = this.parseBlock(terminatorRegExpString);\n\t\ttree.push.apply(tree,blocks);\n\t\t// Skip any whitespace\n\t\tthis.skipWhitespace();\n\t\t//  Check if we've got the end marker\n\t\tterminatorRegExp.lastIndex = this.pos;\n\t\tmatch = terminatorRegExp.exec(this.source);\n\t}\n\tif(match && match.index === this.pos) {\n\t\tthis.pos = match.index + match[0].length;\n\t}\n\treturn tree;\n};\n\n/*\nParse a run of text at the current position\n\tterminatorRegExp: a regexp at which to stop the run\n\toptions: see below\nOptions available:\n\teatTerminator: move the parse position past any encountered terminator (default false)\n*/\nWikiParser.prototype.parseInlineRun = function(terminatorRegExp,options) {\n\tif(terminatorRegExp) {\n\t\treturn this.parseInlineRunTerminated(terminatorRegExp,options);\n\t} else {\n\t\treturn this.parseInlineRunUnterminated(options);\n\t}\n};\n\nWikiParser.prototype.parseInlineRunUnterminated = function(options) {\n\tvar tree = [];\n\t// Find the next occurrence of an inline rule\n\tvar nextMatch = this.findNextMatch(this.inlineRules,this.pos);\n\t// Loop around the matches until we've reached the end of the text\n\twhile(this.pos < this.sourceLength && nextMatch) {\n\t\t// Process the text preceding the run rule\n\t\tif(nextMatch.matchIndex > this.pos) {\n\t\t\ttree.push({type: \"text\", text: this.source.substring(this.pos,nextMatch.matchIndex)});\n\t\t\tthis.pos = nextMatch.matchIndex;\n\t\t}\n\t\t// Process the run rule\n\t\ttree.push.apply(tree,nextMatch.rule.parse());\n\t\t// Look for the next run rule\n\t\tnextMatch = this.findNextMatch(this.inlineRules,this.pos);\n\t}\n\t// Process the remaining text\n\tif(this.pos < this.sourceLength) {\n\t\ttree.push({type: \"text\", text: this.source.substr(this.pos)});\n\t}\n\tthis.pos = this.sourceLength;\n\treturn tree;\n};\n\nWikiParser.prototype.parseInlineRunTerminated = function(terminatorRegExp,options) {\n\toptions = options || {};\n\tvar tree = [];\n\t// Find the next occurrence of the terminator\n\tterminatorRegExp.lastIndex = this.pos;\n\tvar terminatorMatch = terminatorRegExp.exec(this.source);\n\t// Find the next occurrence of a inlinerule\n\tvar inlineRuleMatch = this.findNextMatch(this.inlineRules,this.pos);\n\t// Loop around until we've reached the end of the text\n\twhile(this.pos < this.sourceLength && (terminatorMatch || inlineRuleMatch)) {\n\t\t// Return if we've found the terminator, and it precedes any inline rule match\n\t\tif(terminatorMatch) {\n\t\t\tif(!inlineRuleMatch || inlineRuleMatch.matchIndex >= terminatorMatch.index) {\n\t\t\t\tif(terminatorMatch.index > this.pos) {\n\t\t\t\t\ttree.push({type: \"text\", text: this.source.substring(this.pos,terminatorMatch.index)});\n\t\t\t\t}\n\t\t\t\tthis.pos = terminatorMatch.index;\n\t\t\t\tif(options.eatTerminator) {\n\t\t\t\t\tthis.pos += terminatorMatch[0].length;\n\t\t\t\t}\n\t\t\t\treturn tree;\n\t\t\t}\n\t\t}\n\t\t// Process any inline rule, along with the text preceding it\n\t\tif(inlineRuleMatch) {\n\t\t\t// Preceding text\n\t\t\tif(inlineRuleMatch.matchIndex > this.pos) {\n\t\t\t\ttree.push({type: \"text\", text: this.source.substring(this.pos,inlineRuleMatch.matchIndex)});\n\t\t\t\tthis.pos = inlineRuleMatch.matchIndex;\n\t\t\t}\n\t\t\t// Process the inline rule\n\t\t\ttree.push.apply(tree,inlineRuleMatch.rule.parse());\n\t\t\t// Look for the next inline rule\n\t\t\tinlineRuleMatch = this.findNextMatch(this.inlineRules,this.pos);\n\t\t\t// Look for the next terminator match\n\t\t\tterminatorRegExp.lastIndex = this.pos;\n\t\t\tterminatorMatch = terminatorRegExp.exec(this.source);\n\t\t}\n\t}\n\t// Process the remaining text\n\tif(this.pos < this.sourceLength) {\n\t\ttree.push({type: \"text\", text: this.source.substr(this.pos)});\n\t}\n\tthis.pos = this.sourceLength;\n\treturn tree;\n};\n\n/*\nParse zero or more class specifiers `.classname`\n*/\nWikiParser.prototype.parseClasses = function() {\n\tvar classRegExp = /\\.([^\\s\\.]+)/mg,\n\t\tclassNames = [];\n\tclassRegExp.lastIndex = this.pos;\n\tvar match = classRegExp.exec(this.source);\n\twhile(match && match.index === this.pos) {\n\t\tthis.pos = match.index + match[0].length;\n\t\tclassNames.push(match[1]);\n\t\tmatch = classRegExp.exec(this.source);\n\t}\n\treturn classNames;\n};\n\n/*\nAmend the rules used by this instance of the parser\n\ttype: `only` keeps just the named rules, `except` keeps all but the named rules\n\tnames: array of rule names\n*/\nWikiParser.prototype.amendRules = function(type,names) {\n\tnames = names || [];\n\t// Define the filter function\n\tvar keepFilter;\n\tif(type === \"only\") {\n\t\tkeepFilter = function(name) {\n\t\t\treturn names.indexOf(name) !== -1;\n\t\t};\n\t} else if(type === \"except\") {\n\t\tkeepFilter = function(name) {\n\t\t\treturn names.indexOf(name) === -1;\n\t\t};\n\t} else {\n\t\treturn;\n\t}\n\t// Define a function to process each of our rule arrays\n\tvar processRuleArray = function(ruleArray) {\n\t\tfor(var t=ruleArray.length-1; t>=0; t--) {\n\t\t\tif(!keepFilter(ruleArray[t].rule.name)) {\n\t\t\t\truleArray.splice(t,1);\n\t\t\t}\n\t\t}\n\t};\n\t// Process each rule array\n\tprocessRuleArray(this.pragmaRules);\n\tprocessRuleArray(this.blockRules);\n\tprocessRuleArray(this.inlineRules);\n};\n\nexports[\"text/vnd.tiddlywiki\"] = WikiParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/wikiparser/wikiparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/wikiparser/rules/wikirulebase.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/wikirulebase.js\ntype: application/javascript\nmodule-type: global\n\nBase class for wiki parser rules\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nThis constructor is always overridden with a blank constructor, and so shouldn't be used\n*/\nvar WikiRuleBase = function() {\n};\n\n/*\nTo be overridden by individual rules\n*/\nWikiRuleBase.prototype.init = function(parser) {\n\tthis.parser = parser;\n};\n\n/*\nDefault implementation of findNextMatch uses RegExp matching\n*/\nWikiRuleBase.prototype.findNextMatch = function(startPos) {\n\tthis.matchRegExp.lastIndex = startPos;\n\tthis.match = this.matchRegExp.exec(this.parser.source);\n\treturn this.match ? this.match.index : undefined;\n};\n\nexports.WikiRuleBase = WikiRuleBase;\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/wikirulebase.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/pluginswitcher.js": {
            "text": "/*\\\ntitle: $:/core/modules/pluginswitcher.js\ntype: application/javascript\nmodule-type: global\n\nManages switching plugins for themes and languages.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\noptions:\nwiki: wiki store to be used\npluginType: type of plugin to be switched\ncontrollerTitle: title of tiddler used to control switching of this resource\ndefaultPlugins: array of default plugins to be used if nominated plugin isn't found\n*/\nfunction PluginSwitcher(options) {\n\tthis.wiki = options.wiki;\n\tthis.pluginType = options.pluginType;\n\tthis.controllerTitle = options.controllerTitle;\n\tthis.defaultPlugins = options.defaultPlugins || [];\n\t// Switch to the current plugin\n\tthis.switchPlugins();\n\t// Listen for changes to the selected plugin\n\tvar self = this;\n\tthis.wiki.addEventListener(\"change\",function(changes) {\n\t\tif($tw.utils.hop(changes,self.controllerTitle)) {\n\t\t\tself.switchPlugins();\n\t\t}\n\t});\n}\n\nPluginSwitcher.prototype.switchPlugins = function() {\n\t// Get the name of the current theme\n\tvar selectedPluginTitle = this.wiki.getTiddlerText(this.controllerTitle);\n\t// If it doesn't exist, then fallback to one of the default themes\n\tvar index = 0;\n\twhile(!this.wiki.getTiddler(selectedPluginTitle) && index < this.defaultPlugins.length) {\n\t\tselectedPluginTitle = this.defaultPlugins[index++];\n\t}\n\t// Accumulate the titles of the plugins that we need to load\n\tvar plugins = [],\n\t\tself = this,\n\t\taccumulatePlugin = function(title) {\n\t\t\tvar tiddler = self.wiki.getTiddler(title);\n\t\t\tif(tiddler && tiddler.isPlugin() && plugins.indexOf(title) === -1) {\n\t\t\t\tplugins.push(title);\n\t\t\t\tvar pluginInfo = JSON.parse(self.wiki.getTiddlerText(title)),\n\t\t\t\t\tdependents = $tw.utils.parseStringArray(tiddler.fields.dependents || \"\");\n\t\t\t\t$tw.utils.each(dependents,function(title) {\n\t\t\t\t\taccumulatePlugin(title);\n\t\t\t\t});\n\t\t\t}\n\t\t};\n\taccumulatePlugin(selectedPluginTitle);\n\t// Unregister any existing theme tiddlers\n\tvar unregisteredTiddlers = $tw.wiki.unregisterPluginTiddlers(this.pluginType);\n\t// Register any new theme tiddlers\n\tvar registeredTiddlers = $tw.wiki.registerPluginTiddlers(this.pluginType,plugins);\n\t// Unpack the current theme tiddlers\n\t$tw.wiki.unpackPluginTiddlers();\n};\n\nexports.PluginSwitcher = PluginSwitcher;\n\n})();\n",
            "title": "$:/core/modules/pluginswitcher.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/saver-handler.js": {
            "text": "/*\\\ntitle: $:/core/modules/saver-handler.js\ntype: application/javascript\nmodule-type: global\n\nThe saver handler tracks changes to the store and handles saving the entire wiki via saver modules.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInstantiate the saver handler with the following options:\nwiki: wiki to be synced\ndirtyTracking: true if dirty tracking should be performed\n*/\nfunction SaverHandler(options) {\n\tvar self = this;\n\tthis.wiki = options.wiki;\n\tthis.dirtyTracking = options.dirtyTracking;\n\tthis.pendingAutoSave = false;\n\t// Make a logger\n\tthis.logger = new $tw.utils.Logger(\"saver-handler\");\n\t// Initialise our savers\n\tif($tw.browser) {\n\t\tthis.initSavers();\n\t}\n\t// Only do dirty tracking if required\n\tif($tw.browser && this.dirtyTracking) {\n\t\t// Compile the dirty tiddler filter\n\t\tthis.filterFn = this.wiki.compileFilter(this.wiki.getTiddlerText(this.titleSyncFilter));\n\t\t// Count of changes that have not yet been saved\n\t\tthis.numChanges = 0;\n\t\t// Listen out for changes to tiddlers\n\t\tthis.wiki.addEventListener(\"change\",function(changes) {\n\t\t\t// Filter the changes so that we only count changes to tiddlers that we care about\n\t\t\tvar filteredChanges = self.filterFn.call(self.wiki,function(callback) {\n\t\t\t\t$tw.utils.each(changes,function(change,title) {\n\t\t\t\t\tvar tiddler = self.wiki.getTiddler(title);\n\t\t\t\t\tcallback(tiddler,title);\n\t\t\t\t});\n\t\t\t});\n\t\t\t// Adjust the number of changes\n\t\t\tself.numChanges += filteredChanges.length;\n\t\t\tself.updateDirtyStatus();\n\t\t\t// Do any autosave if one is pending and there's no more change events\n\t\t\tif(self.pendingAutoSave && self.wiki.getSizeOfTiddlerEventQueue() === 0) {\n\t\t\t\t// Check if we're dirty\n\t\t\t\tif(self.numChanges > 0) {\n\t\t\t\t\tself.saveWiki({\n\t\t\t\t\t\tmethod: \"autosave\",\n\t\t\t\t\t\tdownloadType: \"text/plain\"\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tself.pendingAutoSave = false;\n\t\t\t}\n\t\t});\n\t\t// Listen for the autosave event\n\t\t$tw.rootWidget.addEventListener(\"tm-auto-save-wiki\",function(event) {\n\t\t\t// Do the autosave unless there are outstanding tiddler change events\n\t\t\tif(self.wiki.getSizeOfTiddlerEventQueue() === 0) {\n\t\t\t\t// Check if we're dirty\n\t\t\t\tif(self.numChanges > 0) {\n\t\t\t\t\tself.saveWiki({\n\t\t\t\t\t\tmethod: \"autosave\",\n\t\t\t\t\t\tdownloadType: \"text/plain\"\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Otherwise put ourselves in the \"pending autosave\" state and wait for the change event before we do the autosave\n\t\t\t\tself.pendingAutoSave = true;\n\t\t\t}\n\t\t});\n\t\t// Set up our beforeunload handler\n\t\t$tw.addUnloadTask(function(event) {\n\t\t\tvar confirmationMessage;\n\t\t\tif(self.isDirty()) {\n\t\t\t\tconfirmationMessage = $tw.language.getString(\"UnsavedChangesWarning\");\n\t\t\t\tevent.returnValue = confirmationMessage; // Gecko\n\t\t\t}\n\t\t\treturn confirmationMessage;\n\t\t});\n\t}\n\t// Install the save action handlers\n\tif($tw.browser) {\n\t\t$tw.rootWidget.addEventListener(\"tm-save-wiki\",function(event) {\n\t\t\tself.saveWiki({\n\t\t\t\ttemplate: event.param,\n\t\t\t\tdownloadType: \"text/plain\",\n\t\t\t\tvariables: event.paramObject\n\t\t\t});\n\t\t});\n\t\t$tw.rootWidget.addEventListener(\"tm-download-file\",function(event) {\n\t\t\tself.saveWiki({\n\t\t\t\tmethod: \"download\",\n\t\t\t\ttemplate: event.param,\n\t\t\t\tdownloadType: \"text/plain\",\n\t\t\t\tvariables: event.paramObject\n\t\t\t});\n\t\t});\n\t}\n}\n\nSaverHandler.prototype.titleSyncFilter = \"$:/config/SaverFilter\";\nSaverHandler.prototype.titleAutoSave = \"$:/config/AutoSave\";\nSaverHandler.prototype.titleSavedNotification = \"$:/language/Notifications/Save/Done\";\n\n/*\nSelect the appropriate saver modules and set them up\n*/\nSaverHandler.prototype.initSavers = function(moduleType) {\n\tmoduleType = moduleType || \"saver\";\n\t// Instantiate the available savers\n\tthis.savers = [];\n\tvar self = this;\n\t$tw.modules.forEachModuleOfType(moduleType,function(title,module) {\n\t\tif(module.canSave(self)) {\n\t\t\tself.savers.push(module.create(self.wiki));\n\t\t}\n\t});\n\t// Sort the savers into priority order\n\tthis.savers.sort(function(a,b) {\n\t\tif(a.info.priority < b.info.priority) {\n\t\t\treturn -1;\n\t\t} else {\n\t\t\tif(a.info.priority > b.info.priority) {\n\t\t\t\treturn +1;\n\t\t\t} else {\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t}\n\t});\n};\n\n/*\nSave the wiki contents. Options are:\n\tmethod: \"save\", \"autosave\" or \"download\"\n\ttemplate: the tiddler containing the template to save\n\tdownloadType: the content type for the saved file\n*/\nSaverHandler.prototype.saveWiki = function(options) {\n\toptions = options || {};\n\tvar self = this,\n\t\tmethod = options.method || \"save\",\n\t\tvariables = options.variables || {},\n\t\ttemplate = options.template || \"$:/core/save/all\",\n\t\tdownloadType = options.downloadType || \"text/plain\",\n\t\ttext = this.wiki.renderTiddler(downloadType,template,options),\n\t\tcallback = function(err) {\n\t\t\tif(err) {\n\t\t\t\talert($tw.language.getString(\"Error/WhileSaving\") + \":\\n\\n\" + err);\n\t\t\t} else {\n\t\t\t\t// Clear the task queue if we're saving (rather than downloading)\n\t\t\t\tif(method !== \"download\") {\n\t\t\t\t\tself.numChanges = 0;\n\t\t\t\t\tself.updateDirtyStatus();\n\t\t\t\t}\n\t\t\t\t$tw.notifier.display(self.titleSavedNotification);\n\t\t\t\tif(options.callback) {\n\t\t\t\t\toptions.callback();\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\t// Ignore autosave if disabled\n\tif(method === \"autosave\" && this.wiki.getTiddlerText(this.titleAutoSave,\"yes\") !== \"yes\") {\n\t\treturn false;\n\t}\n\t// Call the highest priority saver that supports this method\n\tfor(var t=this.savers.length-1; t>=0; t--) {\n\t\tvar saver = this.savers[t];\n\t\tif(saver.info.capabilities.indexOf(method) !== -1 && saver.save(text,method,callback,{variables: {filename: variables.filename}})) {\n\t\t\tthis.logger.log(\"Saving wiki with method\",method,\"through saver\",saver.info.name);\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n};\n\n/*\nChecks whether the wiki is dirty (ie the window shouldn't be closed)\n*/\nSaverHandler.prototype.isDirty = function() {\n\treturn this.numChanges > 0;\n};\n\n/*\nUpdate the document body with the class \"tc-dirty\" if the wiki has unsaved/unsynced changes\n*/\nSaverHandler.prototype.updateDirtyStatus = function() {\n\tif($tw.browser) {\n\t\t$tw.utils.toggleClass(document.body,\"tc-dirty\",this.isDirty());\n\t}\n};\n\nexports.SaverHandler = SaverHandler;\n\n})();\n",
            "title": "$:/core/modules/saver-handler.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/savers/andtidwiki.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/andtidwiki.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via the AndTidWiki Android app\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false, netscape: false, Components: false */\n\"use strict\";\n\nvar AndTidWiki = function(wiki) {\n};\n\nAndTidWiki.prototype.save = function(text,method,callback) {\n\t// Get the pathname of this document\n\tvar pathname = decodeURIComponent(document.location.toString().split(\"#\")[0]);\n\t// Strip the file://\n\tif(pathname.indexOf(\"file://\") === 0) {\n\t\tpathname = pathname.substr(7);\n\t}\n\t// Strip any query or location part\n\tvar p = pathname.indexOf(\"?\");\n\tif(p !== -1) {\n\t\tpathname = pathname.substr(0,p);\n\t}\n\tp = pathname.indexOf(\"#\");\n\tif(p !== -1) {\n\t\tpathname = pathname.substr(0,p);\n\t}\n\t// Save the file\n\twindow.twi.saveFile(pathname,text);\n\t// Call the callback\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nAndTidWiki.prototype.info = {\n\tname: \"andtidwiki\",\n\tpriority: 1600,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn !!window.twi && !!window.twi.saveFile;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new AndTidWiki(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/andtidwiki.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/download.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/download.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via HTML5's download APIs\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar DownloadSaver = function(wiki) {\n};\n\nDownloadSaver.prototype.save = function(text,method,callback,options) {\n\toptions = options || {};\n\t// Get the current filename\n\tvar filename = options.variables.filename;\n\tif(!filename) {\n\t\tvar p = document.location.pathname.lastIndexOf(\"/\");\n\t\tif(p !== -1) {\n\t\t\tfilename = document.location.pathname.substr(p+1);\n\t\t}\n\t}\n\tif(!filename) {\n\t\tfilename = \"tiddlywiki.html\";\n\t}\n\t// Set up the link\n\tvar link = document.createElement(\"a\");\n\tlink.setAttribute(\"target\",\"_blank\");\n\tlink.setAttribute(\"rel\",\"noopener noreferrer\");\n\tif(Blob !== undefined) {\n\t\tvar blob = new Blob([text], {type: \"text/html\"});\n\t\tlink.setAttribute(\"href\", URL.createObjectURL(blob));\n\t} else {\n\t\tlink.setAttribute(\"href\",\"data:text/html,\" + encodeURIComponent(text));\n\t}\n\tlink.setAttribute(\"download\",filename);\n\tdocument.body.appendChild(link);\n\tlink.click();\n\tdocument.body.removeChild(link);\n\t// Callback that we succeeded\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nDownloadSaver.prototype.info = {\n\tname: \"download\",\n\tpriority: 100,\n\tcapabilities: [\"save\", \"download\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn document.createElement(\"a\").download !== undefined;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new DownloadSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/download.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/fsosaver.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/fsosaver.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via MS FileSystemObject ActiveXObject\n\nNote: Since TiddlyWiki's markup contains the MOTW, the FileSystemObject normally won't be available. \nHowever, if the wiki is loaded as an .HTA file (Windows HTML Applications) then the FSO can be used.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar FSOSaver = function(wiki) {\n};\n\nFSOSaver.prototype.save = function(text,method,callback) {\n\t// Get the pathname of this document\n\tvar pathname = unescape(document.location.pathname);\n\t// Test for a Windows path of the form /x:\\blah...\n\tif(/^\\/[A-Z]\\:\\\\[^\\\\]+/i.test(pathname)) {\t// ie: ^/[a-z]:/[^/]+\n\t\t// Remove the leading slash\n\t\tpathname = pathname.substr(1);\n\t} else if(document.location.hostname !== \"\" && /^\\/\\\\[^\\\\]+\\\\[^\\\\]+/i.test(pathname)) {\t// test for \\\\server\\share\\blah... - ^/[^/]+/[^/]+\n\t\t// Remove the leading slash\n\t\tpathname = pathname.substr(1);\n\t\t// reconstruct UNC path\n\t\tpathname = \"\\\\\\\\\" + document.location.hostname + pathname;\n\t} else {\n\t\treturn false;\n\t}\n\t// Save the file (as UTF-16)\n\tvar fso = new ActiveXObject(\"Scripting.FileSystemObject\");\n\tvar file = fso.OpenTextFile(pathname,2,-1,-1);\n\tfile.Write(text);\n\tfile.Close();\n\t// Callback that we succeeded\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nFSOSaver.prototype.info = {\n\tname: \"FSOSaver\",\n\tpriority: 120,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\ttry {\n\t\treturn (window.location.protocol === \"file:\") && !!(new ActiveXObject(\"Scripting.FileSystemObject\"));\n\t} catch(e) { return false; }\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new FSOSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/fsosaver.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/manualdownload.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/manualdownload.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via HTML5's download APIs\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Title of the tiddler containing the download message\nvar downloadInstructionsTitle = \"$:/language/Modals/Download\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar ManualDownloadSaver = function(wiki) {\n};\n\nManualDownloadSaver.prototype.save = function(text,method,callback) {\n\t$tw.modal.display(downloadInstructionsTitle,{\n\t\tdownloadLink: \"data:text/html,\" + encodeURIComponent(text)\n\t});\n\t// Callback that we succeeded\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nManualDownloadSaver.prototype.info = {\n\tname: \"manualdownload\",\n\tpriority: 0,\n\tcapabilities: [\"save\", \"download\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn true;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new ManualDownloadSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/manualdownload.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/msdownload.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/msdownload.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via window.navigator.msSaveBlob()\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar MsDownloadSaver = function(wiki) {\n};\n\nMsDownloadSaver.prototype.save = function(text,method,callback) {\n\t// Get the current filename\n\tvar filename = \"tiddlywiki.html\",\n\t\tp = document.location.pathname.lastIndexOf(\"/\");\n\tif(p !== -1) {\n\t\tfilename = document.location.pathname.substr(p+1);\n\t}\n\t// Set up the link\n\tvar blob = new Blob([text], {type: \"text/html\"});\n\twindow.navigator.msSaveBlob(blob,filename);\n\t// Callback that we succeeded\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nMsDownloadSaver.prototype.info = {\n\tname: \"msdownload\",\n\tpriority: 110,\n\tcapabilities: [\"save\", \"download\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn !!window.navigator.msSaveBlob;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new MsDownloadSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/msdownload.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/put.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/put.js\ntype: application/javascript\nmodule-type: saver\n\nSaves wiki by performing a PUT request to the server\n\nWorks with any server which accepts a PUT request\nto the current URL, such as a WebDAV server.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar PutSaver = function(wiki) {\n\tthis.wiki = wiki;\n\tvar self = this;\n\t// Async server probe. Until probe finishes, save will fail fast\n\t// See also https://github.com/Jermolene/TiddlyWiki5/issues/2276\n\tvar req = new XMLHttpRequest();\n\treq.open(\"OPTIONS\",encodeURI(document.location.protocol + \"//\" + document.location.hostname + \":\" + document.location.port + document.location.pathname));\n\treq.onload = function() {\n\t\t// Check DAV header http://www.webdav.org/specs/rfc2518.html#rfc.section.9.1\n\t\tself.serverAcceptsPuts = (this.status === 200 && !!this.getResponseHeader('dav'));\n\t};\n\treq.send();\n};\n\nPutSaver.prototype.save = function(text,method,callback) {\n\tif (!this.serverAcceptsPuts) {\n\t\treturn false;\n\t}\n\tvar req = new XMLHttpRequest();\n\t// TODO: store/check ETags if supported by server, to protect against overwrites\n\t// Prompt: Do you want to save over this? Y/N\n\t// Merging would be ideal, and may be possible using future generic merge flow\n\treq.onload = function() {\n\t\tif (this.status === 200 || this.status === 201) {\n\t\t\tcallback(null); // success\n\t\t}\n\t\telse {\n\t\t\tcallback(this.responseText); // fail\n\t\t}\n\t};\n\treq.open(\"PUT\", encodeURI(window.location.href));\n\treq.setRequestHeader(\"Content-Type\", \"text/html;charset=UTF-8\");\n\treq.send(text);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nPutSaver.prototype.info = {\n\tname: \"put\",\n\tpriority: 2000,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn /^https?:/.test(location.protocol);\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new PutSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/put.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/tiddlyfox.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/tiddlyfox.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via the TiddlyFox file extension\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false, netscape: false, Components: false */\n\"use strict\";\n\nvar TiddlyFoxSaver = function(wiki) {\n};\n\nTiddlyFoxSaver.prototype.save = function(text,method,callback) {\n\tvar messageBox = document.getElementById(\"tiddlyfox-message-box\");\n\tif(messageBox) {\n\t\t// Get the pathname of this document\n\t\tvar pathname = document.location.toString().split(\"#\")[0];\n\t\t// Replace file://localhost/ with file:///\n\t\tif(pathname.indexOf(\"file://localhost/\") === 0) {\n\t\t\tpathname = \"file://\" + pathname.substr(16);\n\t\t}\n\t\t// Windows path file:///x:/blah/blah --> x:\\blah\\blah\n\t\tif(/^file\\:\\/\\/\\/[A-Z]\\:\\//i.test(pathname)) {\n\t\t\t// Remove the leading slash and convert slashes to backslashes\n\t\t\tpathname = pathname.substr(8).replace(/\\//g,\"\\\\\");\n\t\t// Firefox Windows network path file://///server/share/blah/blah --> //server/share/blah/blah\n\t\t} else if(pathname.indexOf(\"file://///\") === 0) {\n\t\t\tpathname = \"\\\\\\\\\" + unescape(pathname.substr(10)).replace(/\\//g,\"\\\\\");\n\t\t// Mac/Unix local path file:///path/path --> /path/path\n\t\t} else if(pathname.indexOf(\"file:///\") === 0) {\n\t\t\tpathname = unescape(pathname.substr(7));\n\t\t// Mac/Unix local path file:/path/path --> /path/path\n\t\t} else if(pathname.indexOf(\"file:/\") === 0) {\n\t\t\tpathname = unescape(pathname.substr(5));\n\t\t// Otherwise Windows networth path file://server/share/path/path --> \\\\server\\share\\path\\path\n\t\t} else {\n\t\t\tpathname = \"\\\\\\\\\" + unescape(pathname.substr(7)).replace(new RegExp(\"/\",\"g\"),\"\\\\\");\n\t\t}\n\t\t// Create the message element and put it in the message box\n\t\tvar message = document.createElement(\"div\");\n\t\tmessage.setAttribute(\"data-tiddlyfox-path\",decodeURIComponent(pathname));\n\t\tmessage.setAttribute(\"data-tiddlyfox-content\",text);\n\t\tmessageBox.appendChild(message);\n\t\t// Add an event handler for when the file has been saved\n\t\tmessage.addEventListener(\"tiddlyfox-have-saved-file\",function(event) {\n\t\t\tcallback(null);\n\t\t}, false);\n\t\t// Create and dispatch the custom event to the extension\n\t\tvar event = document.createEvent(\"Events\");\n\t\tevent.initEvent(\"tiddlyfox-save-file\",true,false);\n\t\tmessage.dispatchEvent(event);\n\t\treturn true;\n\t} else {\n\t\treturn false;\n\t}\n};\n\n/*\nInformation about this saver\n*/\nTiddlyFoxSaver.prototype.info = {\n\tname: \"tiddlyfox\",\n\tpriority: 1500,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn (window.location.protocol === \"file:\");\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new TiddlyFoxSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/tiddlyfox.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/tiddlyie.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/tiddlyie.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via Internet Explorer BHO extenion (TiddlyIE)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar TiddlyIESaver = function(wiki) {\n};\n\nTiddlyIESaver.prototype.save = function(text,method,callback) {\n\t// Check existence of TiddlyIE BHO extension (note: only works after document is complete)\n\tif(typeof(window.TiddlyIE) != \"undefined\") {\n\t\t// Get the pathname of this document\n\t\tvar pathname = unescape(document.location.pathname);\n\t\t// Test for a Windows path of the form /x:/blah...\n\t\tif(/^\\/[A-Z]\\:\\/[^\\/]+/i.test(pathname)) {\t// ie: ^/[a-z]:/[^/]+ (is this better?: ^/[a-z]:/[^/]+(/[^/]+)*\\.[^/]+ )\n\t\t\t// Remove the leading slash\n\t\t\tpathname = pathname.substr(1);\n\t\t\t// Convert slashes to backslashes\n\t\t\tpathname = pathname.replace(/\\//g,\"\\\\\");\n\t\t} else if(document.hostname !== \"\" && /^\\/[^\\/]+\\/[^\\/]+/i.test(pathname)) {\t// test for \\\\server\\share\\blah... - ^/[^/]+/[^/]+\n\t\t\t// Convert slashes to backslashes\n\t\t\tpathname = pathname.replace(/\\//g,\"\\\\\");\n\t\t\t// reconstruct UNC path\n\t\t\tpathname = \"\\\\\\\\\" + document.location.hostname + pathname;\n\t\t} else return false;\n\t\t// Prompt the user to save the file\n\t\twindow.TiddlyIE.save(pathname, text);\n\t\t// Callback that we succeeded\n\t\tcallback(null);\n\t\treturn true;\n\t} else {\n\t\treturn false;\n\t}\n};\n\n/*\nInformation about this saver\n*/\nTiddlyIESaver.prototype.info = {\n\tname: \"tiddlyiesaver\",\n\tpriority: 1500,\n\tcapabilities: [\"save\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn (window.location.protocol === \"file:\");\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new TiddlyIESaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/tiddlyie.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/twedit.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/twedit.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via the TWEdit iOS app\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false, netscape: false, Components: false */\n\"use strict\";\n\nvar TWEditSaver = function(wiki) {\n};\n\nTWEditSaver.prototype.save = function(text,method,callback) {\n\t// Bail if we're not running under TWEdit\n\tif(typeof DeviceInfo !== \"object\") {\n\t\treturn false;\n\t}\n\t// Get the pathname of this document\n\tvar pathname = decodeURIComponent(document.location.pathname);\n\t// Strip any query or location part\n\tvar p = pathname.indexOf(\"?\");\n\tif(p !== -1) {\n\t\tpathname = pathname.substr(0,p);\n\t}\n\tp = pathname.indexOf(\"#\");\n\tif(p !== -1) {\n\t\tpathname = pathname.substr(0,p);\n\t}\n\t// Remove the leading \"/Documents\" from path\n\tvar prefix = \"/Documents\";\n\tif(pathname.indexOf(prefix) === 0) {\n\t\tpathname = pathname.substr(prefix.length);\n\t}\n\t// Error handler\n\tvar errorHandler = function(event) {\n\t\t// Error\n\t\tcallback($tw.language.getString(\"Error/SavingToTWEdit\") + \": \" + event.target.error.code);\n\t};\n\t// Get the file system\n\twindow.requestFileSystem(LocalFileSystem.PERSISTENT,0,function(fileSystem) {\n\t\t// Now we've got the filesystem, get the fileEntry\n\t\tfileSystem.root.getFile(pathname, {create: true}, function(fileEntry) {\n\t\t\t// Now we've got the fileEntry, create the writer\n\t\t\tfileEntry.createWriter(function(writer) {\n\t\t\t\twriter.onerror = errorHandler;\n\t\t\t\twriter.onwrite = function() {\n\t\t\t\t\tcallback(null);\n\t\t\t\t};\n\t\t\t\twriter.position = 0;\n\t\t\t\twriter.write(text);\n\t\t\t},errorHandler);\n\t\t}, errorHandler);\n\t}, errorHandler);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nTWEditSaver.prototype.info = {\n\tname: \"twedit\",\n\tpriority: 1600,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn true;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new TWEditSaver(wiki);\n};\n\n/////////////////////////// Hack\n// HACK: This ensures that TWEdit recognises us as a TiddlyWiki document\nif($tw.browser) {\n\twindow.version = {title: \"TiddlyWiki\"};\n}\n\n})();\n",
            "title": "$:/core/modules/savers/twedit.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/upload.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/upload.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via upload to a server.\n\nDesigned to be compatible with BidiX's UploadPlugin at http://tiddlywiki.bidix.info/#UploadPlugin\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar UploadSaver = function(wiki) {\n\tthis.wiki = wiki;\n};\n\nUploadSaver.prototype.save = function(text,method,callback) {\n\t// Get the various parameters we need\n\tvar backupDir = this.wiki.getTextReference(\"$:/UploadBackupDir\") || \".\",\n\t\tusername = this.wiki.getTextReference(\"$:/UploadName\"),\n\t\tpassword = $tw.utils.getPassword(\"upload\"),\n\t\tuploadDir = this.wiki.getTextReference(\"$:/UploadDir\") || \".\",\n\t\tuploadFilename = this.wiki.getTextReference(\"$:/UploadFilename\") || \"index.html\",\n\t\turl = this.wiki.getTextReference(\"$:/UploadURL\");\n\t// Bail out if we don't have the bits we need\n\tif(!username || username.toString().trim() === \"\" || !password || password.toString().trim() === \"\") {\n\t\treturn false;\n\t}\n\t// Construct the url if not provided\n\tif(!url) {\n\t\turl = \"http://\" + username + \".tiddlyspot.com/store.cgi\";\n\t}\n\t// Assemble the header\n\tvar boundary = \"---------------------------\" + \"AaB03x\";\t\n\tvar uploadFormName = \"UploadPlugin\";\n\tvar head = [];\n\thead.push(\"--\" + boundary + \"\\r\\nContent-disposition: form-data; name=\\\"UploadPlugin\\\"\\r\\n\");\n\thead.push(\"backupDir=\" + backupDir + \";user=\" + username + \";password=\" + password + \";uploaddir=\" + uploadDir + \";;\"); \n\thead.push(\"\\r\\n\" + \"--\" + boundary);\n\thead.push(\"Content-disposition: form-data; name=\\\"userfile\\\"; filename=\\\"\" + uploadFilename + \"\\\"\");\n\thead.push(\"Content-Type: text/html;charset=UTF-8\");\n\thead.push(\"Content-Length: \" + text.length + \"\\r\\n\");\n\thead.push(\"\");\n\t// Assemble the tail and the data itself\n\tvar tail = \"\\r\\n--\" + boundary + \"--\\r\\n\",\n\t\tdata = head.join(\"\\r\\n\") + text + tail;\n\t// Do the HTTP post\n\tvar http = new XMLHttpRequest();\n\thttp.open(\"POST\",url,true,username,password);\n\thttp.setRequestHeader(\"Content-Type\",\"multipart/form-data; charset=UTF-8; boundary=\" + boundary);\n\thttp.onreadystatechange = function() {\n\t\tif(http.readyState == 4 && http.status == 200) {\n\t\t\tif(http.responseText.substr(0,4) === \"0 - \") {\n\t\t\t\tcallback(null);\n\t\t\t} else {\n\t\t\t\tcallback(http.responseText);\n\t\t\t}\n\t\t}\n\t};\n\ttry {\n\t\thttp.send(data);\n\t} catch(ex) {\n\t\treturn callback($tw.language.getString(\"Error/Caption\") + \":\" + ex);\n\t}\n\t$tw.notifier.display(\"$:/language/Notifications/Save/Starting\");\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nUploadSaver.prototype.info = {\n\tname: \"upload\",\n\tpriority: 2000,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn true;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new UploadSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/upload.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/browser-messaging.js": {
            "text": "/*\\\ntitle: $:/core/modules/browser-messaging.js\ntype: application/javascript\nmodule-type: startup\n\nBrowser message handling\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"browser-messaging\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\n/*\nLoad a specified url as an iframe and call the callback when it is loaded. If the url is already loaded then the existing iframe instance is used\n*/\nfunction loadIFrame(url,callback) {\n\t// Check if iframe already exists\n\tvar iframeInfo = $tw.browserMessaging.iframeInfoMap[url];\n\tif(iframeInfo) {\n\t\t// We've already got the iframe\n\t\tcallback(null,iframeInfo);\n\t} else {\n\t\t// Create the iframe and save it in the list\n\t\tvar iframe = document.createElement(\"iframe\"),\n\t\t\tiframeInfo = {\n\t\t\t\turl: url,\n\t\t\t\tstatus: \"loading\",\n\t\t\t\tdomNode: iframe\n\t\t\t};\n\t\t$tw.browserMessaging.iframeInfoMap[url] = iframeInfo;\n\t\tsaveIFrameInfoTiddler(iframeInfo);\n\t\t// Add the iframe to the DOM and hide it\n\t\tiframe.style.display = \"none\";\n\t\tdocument.body.appendChild(iframe);\n\t\t// Set up onload\n\t\tiframe.onload = function() {\n\t\t\tiframeInfo.status = \"loaded\";\n\t\t\tsaveIFrameInfoTiddler(iframeInfo);\n\t\t\tcallback(null,iframeInfo);\n\t\t};\n\t\tiframe.onerror = function() {\n\t\t\tcallback(\"Cannot load iframe\");\n\t\t};\n\t\ttry {\n\t\t\tiframe.src = url;\n\t\t} catch(ex) {\n\t\t\tcallback(ex);\n\t\t}\n\t}\n}\n\nfunction saveIFrameInfoTiddler(iframeInfo) {\n\t$tw.wiki.addTiddler(new $tw.Tiddler($tw.wiki.getCreationFields(),{\n\t\ttitle: \"$:/temp/ServerConnection/\" + iframeInfo.url,\n\t\ttext: iframeInfo.status,\n\t\ttags: [\"$:/tags/ServerConnection\"],\n\t\turl: iframeInfo.url\n\t},$tw.wiki.getModificationFields()));\n}\n\nexports.startup = function() {\n\t// Initialise the store of iframes we've created\n\t$tw.browserMessaging = {\n\t\tiframeInfoMap: {} // Hashmap by URL of {url:,status:\"loading/loaded\",domNode:}\n\t};\n\t// Listen for widget messages to control loading the plugin library\n\t$tw.rootWidget.addEventListener(\"tm-load-plugin-library\",function(event) {\n\t\tvar paramObject = event.paramObject || {},\n\t\t\turl = paramObject.url;\n\t\tif(url) {\n\t\t\tloadIFrame(url,function(err,iframeInfo) {\n\t\t\t\tif(err) {\n\t\t\t\t\talert($tw.language.getString(\"Error/LoadingPluginLibrary\") + \": \" + url);\n\t\t\t\t} else {\n\t\t\t\t\tiframeInfo.domNode.contentWindow.postMessage({\n\t\t\t\t\t\tverb: \"GET\",\n\t\t\t\t\t\turl: \"recipes/library/tiddlers.json\",\n\t\t\t\t\t\tcookies: {\n\t\t\t\t\t\t\ttype: \"save-info\",\n\t\t\t\t\t\t\tinfoTitlePrefix: paramObject.infoTitlePrefix || \"$:/temp/RemoteAssetInfo/\",\n\t\t\t\t\t\t\turl: url\n\t\t\t\t\t\t}\n\t\t\t\t\t},\"*\");\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\t$tw.rootWidget.addEventListener(\"tm-load-plugin-from-library\",function(event) {\n\t\tvar paramObject = event.paramObject || {},\n\t\t\turl = paramObject.url,\n\t\t\ttitle = paramObject.title;\n\t\tif(url && title) {\n\t\t\tloadIFrame(url,function(err,iframeInfo) {\n\t\t\t\tif(err) {\n\t\t\t\t\talert($tw.language.getString(\"Error/LoadingPluginLibrary\") + \": \" + url);\n\t\t\t\t} else {\n\t\t\t\t\tiframeInfo.domNode.contentWindow.postMessage({\n\t\t\t\t\t\tverb: \"GET\",\n\t\t\t\t\t\turl: \"recipes/library/tiddlers/\" + encodeURIComponent(title) + \".json\",\n\t\t\t\t\t\tcookies: {\n\t\t\t\t\t\t\ttype: \"save-tiddler\",\n\t\t\t\t\t\t\turl: url\n\t\t\t\t\t\t}\n\t\t\t\t\t},\"*\");\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\t// Listen for window messages from other windows\n\twindow.addEventListener(\"message\",function listener(event){\n\t\tconsole.log(\"browser-messaging: \",document.location.toString())\n\t\tconsole.log(\"browser-messaging: Received message from\",event.origin);\n\t\tconsole.log(\"browser-messaging: Message content\",event.data);\n\t\tswitch(event.data.verb) {\n\t\t\tcase \"GET-RESPONSE\":\n\t\t\t\tif(event.data.status.charAt(0) === \"2\") {\n\t\t\t\t\tif(event.data.cookies) {\n\t\t\t\t\t\tif(event.data.cookies.type === \"save-info\") {\n\t\t\t\t\t\t\tvar tiddlers = JSON.parse(event.data.body);\n\t\t\t\t\t\t\t$tw.utils.each(tiddlers,function(tiddler) {\n\t\t\t\t\t\t\t\t$tw.wiki.addTiddler(new $tw.Tiddler($tw.wiki.getCreationFields(),tiddler,{\n\t\t\t\t\t\t\t\t\ttitle: event.data.cookies.infoTitlePrefix + event.data.cookies.url + \"/\" + tiddler.title,\n\t\t\t\t\t\t\t\t\t\"original-title\": tiddler.title,\n\t\t\t\t\t\t\t\t\ttext: \"\",\n\t\t\t\t\t\t\t\t\ttype: \"text/vnd.tiddlywiki\",\n\t\t\t\t\t\t\t\t\t\"original-type\": tiddler.type,\n\t\t\t\t\t\t\t\t\t\"plugin-type\": undefined,\n\t\t\t\t\t\t\t\t\t\"original-plugin-type\": tiddler[\"plugin-type\"],\n\t\t\t\t\t\t\t\t\t\"module-type\": undefined,\n\t\t\t\t\t\t\t\t\t\"original-module-type\": tiddler[\"module-type\"],\n\t\t\t\t\t\t\t\t\ttags: [\"$:/tags/RemoteAssetInfo\"],\n\t\t\t\t\t\t\t\t\t\"original-tags\": $tw.utils.stringifyList(tiddler.tags || []),\n\t\t\t\t\t\t\t\t\t\"server-url\": event.data.cookies.url\n\t\t\t\t\t\t\t\t},$tw.wiki.getModificationFields()));\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t} else if(event.data.cookies.type === \"save-tiddler\") {\n\t\t\t\t\t\t\tvar tiddler = JSON.parse(event.data.body);\n\t\t\t\t\t\t\t$tw.wiki.addTiddler(new $tw.Tiddler(tiddler));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t}\n\t},false);\n};\n\n})();\n",
            "title": "$:/core/modules/browser-messaging.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/commands.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/commands.js\ntype: application/javascript\nmodule-type: startup\n\nCommand processing\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"commands\";\nexports.platforms = [\"node\"];\nexports.after = [\"story\"];\nexports.synchronous = false;\n\nexports.startup = function(callback) {\n\t// On the server, start a commander with the command line arguments\n\tvar commander = new $tw.Commander(\n\t\t$tw.boot.argv,\n\t\tfunction(err) {\n\t\t\tif(err) {\n\t\t\t\treturn $tw.utils.error(\"Error: \" + err);\n\t\t\t}\n\t\t\tcallback();\n\t\t},\n\t\t$tw.wiki,\n\t\t{output: process.stdout, error: process.stderr}\n\t);\n\tcommander.execute();\n};\n\n})();\n",
            "title": "$:/core/modules/startup/commands.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/favicon.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/favicon.js\ntype: application/javascript\nmodule-type: startup\n\nFavicon handling\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"favicon\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\t\t\n// Favicon tiddler\nvar FAVICON_TITLE = \"$:/favicon.ico\";\n\nexports.startup = function() {\n\t// Set up the favicon\n\tsetFavicon();\n\t// Reset the favicon when the tiddler changes\n\t$tw.wiki.addEventListener(\"change\",function(changes) {\n\t\tif($tw.utils.hop(changes,FAVICON_TITLE)) {\n\t\t\tsetFavicon();\n\t\t}\n\t});\n};\n\nfunction setFavicon() {\n\tvar tiddler = $tw.wiki.getTiddler(FAVICON_TITLE);\n\tif(tiddler) {\n\t\tvar faviconLink = document.getElementById(\"faviconLink\");\n\t\tfaviconLink.setAttribute(\"href\",\"data:\" + tiddler.fields.type + \";base64,\" + tiddler.fields.text);\n\t}\n}\n\n})();\n",
            "title": "$:/core/modules/startup/favicon.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/info.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/info.js\ntype: application/javascript\nmodule-type: startup\n\nInitialise $:/info tiddlers via $:/temp/info-plugin pseudo-plugin\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"info\";\nexports.before = [\"startup\"];\nexports.after = [\"load-modules\"];\nexports.synchronous = true;\n\nexports.startup = function() {\n\t// Collect up the info tiddlers\n\tvar infoTiddlerFields = {};\n\t// Give each info module a chance to fill in as many info tiddlers as they want\n\t$tw.modules.forEachModuleOfType(\"info\",function(title,moduleExports) {\n\t\tif(moduleExports && moduleExports.getInfoTiddlerFields) {\n\t\t\tvar tiddlerFieldsArray = moduleExports.getInfoTiddlerFields(infoTiddlerFields);\n\t\t\t$tw.utils.each(tiddlerFieldsArray,function(fields) {\n\t\t\t\tif(fields) {\n\t\t\t\t\tinfoTiddlerFields[fields.title] = fields;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\t// Bake the info tiddlers into a plugin\n\tvar fields = {\n\t\ttitle: \"$:/temp/info-plugin\",\n\t\ttype: \"application/json\",\n\t\t\"plugin-type\": \"info\",\n\t\ttext: JSON.stringify({tiddlers: infoTiddlerFields},null,$tw.config.preferences.jsonSpaces)\n\t};\n\t$tw.wiki.addTiddler(new $tw.Tiddler(fields));\n\t$tw.wiki.readPluginInfo();\n\t$tw.wiki.registerPluginTiddlers(\"info\");\n\t$tw.wiki.unpackPluginTiddlers();\n};\n\n})();\n",
            "title": "$:/core/modules/startup/info.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/load-modules.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/load-modules.js\ntype: application/javascript\nmodule-type: startup\n\nLoad core modules\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"load-modules\";\nexports.synchronous = true;\n\nexports.startup = function() {\n\t// Load modules\n\t$tw.modules.applyMethods(\"utils\",$tw.utils);\n\tif($tw.node) {\n\t\t$tw.modules.applyMethods(\"utils-node\",$tw.utils);\n\t}\n\t$tw.modules.applyMethods(\"global\",$tw);\n\t$tw.modules.applyMethods(\"config\",$tw.config);\n\t$tw.Tiddler.fieldModules = $tw.modules.getModulesByTypeAsHashmap(\"tiddlerfield\");\n\t$tw.modules.applyMethods(\"tiddlermethod\",$tw.Tiddler.prototype);\n\t$tw.modules.applyMethods(\"wikimethod\",$tw.Wiki.prototype);\n\t$tw.modules.applyMethods(\"tiddlerdeserializer\",$tw.Wiki.tiddlerDeserializerModules);\n\t$tw.macros = $tw.modules.getModulesByTypeAsHashmap(\"macro\");\n\t$tw.wiki.initParsers();\n\t$tw.Commander.initCommands();\n};\n\n})();\n",
            "title": "$:/core/modules/startup/load-modules.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/password.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/password.js\ntype: application/javascript\nmodule-type: startup\n\nPassword handling\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"password\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\nexports.startup = function() {\n\t$tw.rootWidget.addEventListener(\"tm-set-password\",function(event) {\n\t\t$tw.passwordPrompt.createPrompt({\n\t\t\tserviceName: $tw.language.getString(\"Encryption/PromptSetPassword\"),\n\t\t\tnoUserName: true,\n\t\t\tsubmitText: $tw.language.getString(\"Encryption/SetPassword\"),\n\t\t\tcanCancel: true,\n\t\t\trepeatPassword: true,\n\t\t\tcallback: function(data) {\n\t\t\t\tif(data) {\n\t\t\t\t\t$tw.crypto.setPassword(data.password);\n\t\t\t\t}\n\t\t\t\treturn true; // Get rid of the password prompt\n\t\t\t}\n\t\t});\n\t});\n\t$tw.rootWidget.addEventListener(\"tm-clear-password\",function(event) {\n\t\tif($tw.browser) {\n\t\t\tif(!confirm($tw.language.getString(\"Encryption/ConfirmClearPassword\"))) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t\t$tw.crypto.setPassword(null);\n\t});\n\t// Ensure that $:/isEncrypted is maintained properly\n\t$tw.wiki.addEventListener(\"change\",function(changes) {\n\t\tif($tw.utils.hop(changes,\"$:/isEncrypted\")) {\n\t\t\t$tw.crypto.updateCryptoStateTiddler();\n\t\t}\n\t});\n};\n\n})();\n",
            "title": "$:/core/modules/startup/password.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/render.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/render.js\ntype: application/javascript\nmodule-type: startup\n\nTitle, stylesheet and page rendering\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"render\";\nexports.platforms = [\"browser\"];\nexports.after = [\"story\"];\nexports.synchronous = true;\n\n// Default story and history lists\nvar PAGE_TITLE_TITLE = \"$:/core/wiki/title\";\nvar PAGE_STYLESHEET_TITLE = \"$:/core/ui/PageStylesheet\";\nvar PAGE_TEMPLATE_TITLE = \"$:/core/ui/PageTemplate\";\n\n// Time (in ms) that we defer refreshing changes to draft tiddlers\nvar DRAFT_TIDDLER_TIMEOUT_TITLE = \"$:/config/Drafts/TypingTimeout\";\nvar DRAFT_TIDDLER_TIMEOUT = 400;\n\nexports.startup = function() {\n\t// Set up the title\n\t$tw.titleWidgetNode = $tw.wiki.makeTranscludeWidget(PAGE_TITLE_TITLE,{document: $tw.fakeDocument, parseAsInline: true});\n\t$tw.titleContainer = $tw.fakeDocument.createElement(\"div\");\n\t$tw.titleWidgetNode.render($tw.titleContainer,null);\n\tdocument.title = $tw.titleContainer.textContent;\n\t$tw.wiki.addEventListener(\"change\",function(changes) {\n\t\tif($tw.titleWidgetNode.refresh(changes,$tw.titleContainer,null)) {\n\t\t\tdocument.title = $tw.titleContainer.textContent;\n\t\t}\n\t});\n\t// Set up the styles\n\t$tw.styleWidgetNode = $tw.wiki.makeTranscludeWidget(PAGE_STYLESHEET_TITLE,{document: $tw.fakeDocument});\n\t$tw.styleContainer = $tw.fakeDocument.createElement(\"style\");\n\t$tw.styleWidgetNode.render($tw.styleContainer,null);\n\t$tw.styleElement = document.createElement(\"style\");\n\t$tw.styleElement.innerHTML = $tw.styleContainer.textContent;\n\tdocument.head.insertBefore($tw.styleElement,document.head.firstChild);\n\t$tw.wiki.addEventListener(\"change\",$tw.perf.report(\"styleRefresh\",function(changes) {\n\t\tif($tw.styleWidgetNode.refresh(changes,$tw.styleContainer,null)) {\n\t\t\t$tw.styleElement.innerHTML = $tw.styleContainer.textContent;\n\t\t}\n\t}));\n\t// Display the $:/core/ui/PageTemplate tiddler to kick off the display\n\t$tw.perf.report(\"mainRender\",function() {\n\t\t$tw.pageWidgetNode = $tw.wiki.makeTranscludeWidget(PAGE_TEMPLATE_TITLE,{document: document, parentWidget: $tw.rootWidget});\n\t\t$tw.pageContainer = document.createElement(\"div\");\n\t\t$tw.utils.addClass($tw.pageContainer,\"tc-page-container-wrapper\");\n\t\tdocument.body.insertBefore($tw.pageContainer,document.body.firstChild);\n\t\t$tw.pageWidgetNode.render($tw.pageContainer,null);\n\t})();\n\t// Prepare refresh mechanism\n\tvar deferredChanges = Object.create(null),\n\t\ttimerId;\n\tfunction refresh() {\n\t\t// Process the refresh\n\t\t$tw.pageWidgetNode.refresh(deferredChanges);\n\t\tdeferredChanges = Object.create(null);\n\t}\n\t// Add the change event handler\n\t$tw.wiki.addEventListener(\"change\",$tw.perf.report(\"mainRefresh\",function(changes) {\n\t\t// Check if only drafts have changed\n\t\tvar onlyDraftsHaveChanged = true;\n\t\tfor(var title in changes) {\n\t\t\tvar tiddler = $tw.wiki.getTiddler(title);\n\t\t\tif(!tiddler || !tiddler.hasField(\"draft.of\")) {\n\t\t\t\tonlyDraftsHaveChanged = false;\n\t\t\t}\n\t\t}\n\t\t// Defer the change if only drafts have changed\n\t\tif(timerId) {\n\t\t\tclearTimeout(timerId);\n\t\t}\n\t\ttimerId = null;\n\t\tif(onlyDraftsHaveChanged) {\n\t\t\tvar timeout = parseInt($tw.wiki.getTiddlerText(DRAFT_TIDDLER_TIMEOUT_TITLE,\"\"),10);\n\t\t\tif(isNaN(timeout)) {\n\t\t\t\ttimeout = DRAFT_TIDDLER_TIMEOUT;\n\t\t\t}\n\t\t\ttimerId = setTimeout(refresh,timeout);\n\t\t\t$tw.utils.extend(deferredChanges,changes);\n\t\t} else {\n\t\t\t$tw.utils.extend(deferredChanges,changes);\n\t\t\trefresh();\n\t\t}\n\t}));\n\t// Fix up the link between the root widget and the page container\n\t$tw.rootWidget.domNodes = [$tw.pageContainer];\n\t$tw.rootWidget.children = [$tw.pageWidgetNode];\n};\n\n})();\n",
            "title": "$:/core/modules/startup/render.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/rootwidget.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/rootwidget.js\ntype: application/javascript\nmodule-type: startup\n\nSetup the root widget and the core root widget handlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"rootwidget\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.before = [\"story\"];\nexports.synchronous = true;\n\nexports.startup = function() {\n\t// Install the modal message mechanism\n\t$tw.modal = new $tw.utils.Modal($tw.wiki);\n\t$tw.rootWidget.addEventListener(\"tm-modal\",function(event) {\n\t\t$tw.modal.display(event.param,{variables: event.paramObject});\n\t});\n\t// Install the notification  mechanism\n\t$tw.notifier = new $tw.utils.Notifier($tw.wiki);\n\t$tw.rootWidget.addEventListener(\"tm-notify\",function(event) {\n\t\t$tw.notifier.display(event.param,{variables: event.paramObject});\n\t});\n\t// Install the scroller\n\t$tw.pageScroller = new $tw.utils.PageScroller();\n\t$tw.rootWidget.addEventListener(\"tm-scroll\",function(event) {\n\t\t$tw.pageScroller.handleEvent(event);\n\t});\n\tvar fullscreen = $tw.utils.getFullScreenApis();\n\tif(fullscreen) {\n\t\t$tw.rootWidget.addEventListener(\"tm-full-screen\",function(event) {\n\t\t\tif(document[fullscreen._fullscreenElement]) {\n\t\t\t\tdocument[fullscreen._exitFullscreen]();\n\t\t\t} else {\n\t\t\t\tdocument.documentElement[fullscreen._requestFullscreen](Element.ALLOW_KEYBOARD_INPUT);\n\t\t\t}\n\t\t});\n\t}\n\t// If we're being viewed on a data: URI then give instructions for how to save\n\tif(document.location.protocol === \"data:\") {\n\t\t$tw.rootWidget.dispatchEvent({\n\t\t\ttype: \"tm-modal\",\n\t\t\tparam: \"$:/language/Modals/SaveInstructions\"\n\t\t});\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/startup/rootwidget.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup.js\ntype: application/javascript\nmodule-type: startup\n\nMiscellaneous startup logic for both the client and server.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"startup\";\nexports.after = [\"load-modules\"];\nexports.synchronous = true;\n\n// Set to `true` to enable performance instrumentation\nvar PERFORMANCE_INSTRUMENTATION_CONFIG_TITLE = \"$:/config/Performance/Instrumentation\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.startup = function() {\n\tvar modules,n,m,f;\n\t// Minimal browser detection\n\tif($tw.browser) {\n\t\t$tw.browser.isIE = (/msie|trident/i.test(navigator.userAgent));\n\t\t$tw.browser.isFirefox = !!document.mozFullScreenEnabled;\n\t}\n\t// Platform detection\n\t$tw.platform = {};\n\tif($tw.browser) {\n\t\t$tw.platform.isMac = /Mac/.test(navigator.platform);\n\t\t$tw.platform.isWindows = /win/i.test(navigator.platform);\n\t\t$tw.platform.isLinux = /Linux/i.test(navigator.appVersion);\n\t} else {\n\t\tswitch(require(\"os\").platform()) {\n\t\t\tcase \"darwin\":\n\t\t\t\t$tw.platform.isMac = true;\n\t\t\t\tbreak;\n\t\t\tcase \"win32\":\n\t\t\t\t$tw.platform.isWindows = true;\n\t\t\t\tbreak;\n\t\t\tcase \"freebsd\":\n\t\t\t\t$tw.platform.isLinux = true;\n\t\t\t\tbreak;\n\t\t\tcase \"linux\":\n\t\t\t\t$tw.platform.isLinux = true;\n\t\t\t\tbreak;\n\t\t}\n\t}\n\t// Initialise version\n\t$tw.version = $tw.utils.extractVersionInfo();\n\t// Set up the performance framework\n\t$tw.perf = new $tw.Performance($tw.wiki.getTiddlerText(PERFORMANCE_INSTRUMENTATION_CONFIG_TITLE,\"no\") === \"yes\");\n\t// Kick off the language manager and switcher\n\t$tw.language = new $tw.Language();\n\t$tw.languageSwitcher = new $tw.PluginSwitcher({\n\t\twiki: $tw.wiki,\n\t\tpluginType: \"language\",\n\t\tcontrollerTitle: \"$:/language\",\n\t\tdefaultPlugins: [\n\t\t\t\"$:/languages/en-US\"\n\t\t]\n\t});\n\t// Kick off the theme manager\n\t$tw.themeManager = new $tw.PluginSwitcher({\n\t\twiki: $tw.wiki,\n\t\tpluginType: \"theme\",\n\t\tcontrollerTitle: \"$:/theme\",\n\t\tdefaultPlugins: [\n\t\t\t\"$:/themes/tiddlywiki/snowwhite\",\n\t\t\t\"$:/themes/tiddlywiki/vanilla\"\n\t\t]\n\t});\n\t// Kick off the keyboard manager\n\t$tw.keyboardManager = new $tw.KeyboardManager();\n\t// Clear outstanding tiddler store change events to avoid an unnecessary refresh cycle at startup\n\t$tw.wiki.clearTiddlerEventQueue();\n\t// Create a root widget for attaching event handlers. By using it as the parentWidget for another widget tree, one can reuse the event handlers\n\tif($tw.browser) {\n\t\t$tw.rootWidget = new widget.widget({\n\t\t\ttype: \"widget\",\n\t\t\tchildren: []\n\t\t},{\n\t\t\twiki: $tw.wiki,\n\t\t\tdocument: document\n\t\t});\n\t}\n\t// Find a working syncadaptor\n\t$tw.syncadaptor = undefined;\n\t$tw.modules.forEachModuleOfType(\"syncadaptor\",function(title,module) {\n\t\tif(!$tw.syncadaptor && module.adaptorClass) {\n\t\t\t$tw.syncadaptor = new module.adaptorClass({wiki: $tw.wiki});\n\t\t}\n\t});\n\t// Set up the syncer object if we've got a syncadaptor\n\tif($tw.syncadaptor) {\n\t\t$tw.syncer = new $tw.Syncer({wiki: $tw.wiki, syncadaptor: $tw.syncadaptor});\n\t} \n\t// Setup the saver handler\n\t$tw.saverHandler = new $tw.SaverHandler({wiki: $tw.wiki, dirtyTracking: !$tw.syncadaptor});\n\t// Host-specific startup\n\tif($tw.browser) {\n\t\t// Install the popup manager\n\t\t$tw.popup = new $tw.utils.Popup();\n\t\t// Install the animator\n\t\t$tw.anim = new $tw.utils.Animator();\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/startup.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/story.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/story.js\ntype: application/javascript\nmodule-type: startup\n\nLoad core modules\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"story\";\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\n// Default story and history lists\nvar DEFAULT_STORY_TITLE = \"$:/StoryList\";\nvar DEFAULT_HISTORY_TITLE = \"$:/HistoryList\";\n\n// Default tiddlers\nvar DEFAULT_TIDDLERS_TITLE = \"$:/DefaultTiddlers\";\n\n// Config\nvar CONFIG_UPDATE_ADDRESS_BAR = \"$:/config/Navigation/UpdateAddressBar\"; // Can be \"no\", \"permalink\", \"permaview\"\nvar CONFIG_UPDATE_HISTORY = \"$:/config/Navigation/UpdateHistory\"; // Can be \"yes\" or \"no\"\n\nexports.startup = function() {\n\t// Open startup tiddlers\n\topenStartupTiddlers();\n\tif($tw.browser) {\n\t\t// Set up location hash update\n\t\t$tw.wiki.addEventListener(\"change\",function(changes) {\n\t\t\tif($tw.utils.hop(changes,DEFAULT_STORY_TITLE) || $tw.utils.hop(changes,DEFAULT_HISTORY_TITLE)) {\n\t\t\t\tupdateLocationHash({\n\t\t\t\t\tupdateAddressBar: $tw.wiki.getTiddlerText(CONFIG_UPDATE_ADDRESS_BAR,\"permaview\").trim(),\n\t\t\t\t\tupdateHistory: $tw.wiki.getTiddlerText(CONFIG_UPDATE_HISTORY,\"no\").trim()\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\t\t// Listen for changes to the browser location hash\n\t\twindow.addEventListener(\"hashchange\",function() {\n\t\t\tvar hash = $tw.utils.getLocationHash();\n\t\t\tif(hash !== $tw.locationHash) {\n\t\t\t\t$tw.locationHash = hash;\n\t\t\t\topenStartupTiddlers({defaultToCurrentStory: true});\n\t\t\t}\n\t\t},false);\n\t\t// Listen for the tm-browser-refresh message\n\t\t$tw.rootWidget.addEventListener(\"tm-browser-refresh\",function(event) {\n\t\t\twindow.location.reload(true);\n\t\t});\n\t\t// Listen for the tm-home message\n\t\t$tw.rootWidget.addEventListener(\"tm-home\",function(event) {\n\t\t\twindow.location.hash = \"\";\n\t\t\tvar storyFilter = $tw.wiki.getTiddlerText(DEFAULT_TIDDLERS_TITLE),\n\t\t\t\tstoryList = $tw.wiki.filterTiddlers(storyFilter);\n\t\t\t//invoke any hooks that might change the default story list\n\t\t\tstoryList = $tw.hooks.invokeHook(\"th-opening-default-tiddlers-list\",storyList);\n\t\t\t$tw.wiki.addTiddler({title: DEFAULT_STORY_TITLE, text: \"\", list: storyList},$tw.wiki.getModificationFields());\n\t\t\tif(storyList[0]) {\n\t\t\t\t$tw.wiki.addToHistory(storyList[0]);\t\t\t\t\n\t\t\t}\n\t\t});\n\t\t// Listen for the tm-permalink message\n\t\t$tw.rootWidget.addEventListener(\"tm-permalink\",function(event) {\n\t\t\tupdateLocationHash({\n\t\t\t\tupdateAddressBar: \"permalink\",\n\t\t\t\tupdateHistory: $tw.wiki.getTiddlerText(CONFIG_UPDATE_HISTORY,\"no\").trim(),\n\t\t\t\ttargetTiddler: event.param || event.tiddlerTitle\n\t\t\t});\n\t\t});\n\t\t// Listen for the tm-permaview message\n\t\t$tw.rootWidget.addEventListener(\"tm-permaview\",function(event) {\n\t\t\tupdateLocationHash({\n\t\t\t\tupdateAddressBar: \"permaview\",\n\t\t\t\tupdateHistory: $tw.wiki.getTiddlerText(CONFIG_UPDATE_HISTORY,\"no\").trim(),\n\t\t\t\ttargetTiddler: event.param || event.tiddlerTitle\n\t\t\t});\n\t\t});\n\t}\n};\n\n/*\nProcess the location hash to open the specified tiddlers. Options:\ndefaultToCurrentStory: If true, the current story is retained as the default, instead of opening the default tiddlers\n*/\nfunction openStartupTiddlers(options) {\n\toptions = options || {};\n\t// Work out the target tiddler and the story filter. \"null\" means \"unspecified\"\n\tvar target = null,\n\t\tstoryFilter = null;\n\tif($tw.locationHash.length > 1) {\n\t\tvar hash = $tw.locationHash.substr(1),\n\t\t\tsplit = hash.indexOf(\":\");\n\t\tif(split === -1) {\n\t\t\ttarget = decodeURIComponent(hash.trim());\n\t\t} else {\n\t\t\ttarget = decodeURIComponent(hash.substr(0,split).trim());\n\t\t\tstoryFilter = decodeURIComponent(hash.substr(split + 1).trim());\n\t\t}\n\t}\n\t// If the story wasn't specified use the current tiddlers or a blank story\n\tif(storyFilter === null) {\n\t\tif(options.defaultToCurrentStory) {\n\t\t\tvar currStoryList = $tw.wiki.getTiddlerList(DEFAULT_STORY_TITLE);\n\t\t\tstoryFilter = $tw.utils.stringifyList(currStoryList);\n\t\t} else {\n\t\t\tif(target && target !== \"\") {\n\t\t\t\tstoryFilter = \"\";\n\t\t\t} else {\n\t\t\t\tstoryFilter = $tw.wiki.getTiddlerText(DEFAULT_TIDDLERS_TITLE);\n\t\t\t}\n\t\t}\n\t}\n\t// Process the story filter to get the story list\n\tvar storyList = $tw.wiki.filterTiddlers(storyFilter);\n\t// Invoke any hooks that want to change the default story list\n\tstoryList = $tw.hooks.invokeHook(\"th-opening-default-tiddlers-list\",storyList);\n\t// If the target tiddler isn't included then splice it in at the top\n\tif(target && storyList.indexOf(target) === -1) {\n\t\tstoryList.unshift(target);\n\t}\n\t// Save the story list\n\t$tw.wiki.addTiddler({title: DEFAULT_STORY_TITLE, text: \"\", list: storyList},$tw.wiki.getModificationFields());\n\t// If a target tiddler was specified add it to the history stack\n\tif(target && target !== \"\") {\n\t\t// The target tiddler doesn't need double square brackets, but we'll silently remove them if they're present\n\t\tif(target.indexOf(\"[[\") === 0 && target.substr(-2) === \"]]\") {\n\t\t\ttarget = target.substr(2,target.length - 4);\n\t\t}\n\t\t$tw.wiki.addToHistory(target);\n\t} else if(storyList.length > 0) {\n\t\t$tw.wiki.addToHistory(storyList[0]);\n\t}\n}\n\n/*\noptions: See below\noptions.updateAddressBar: \"permalink\", \"permaview\" or \"no\" (defaults to \"permaview\")\noptions.updateHistory: \"yes\" or \"no\" (defaults to \"no\")\noptions.targetTiddler: optional title of target tiddler for permalink\n*/\nfunction updateLocationHash(options) {\n\tif(options.updateAddressBar !== \"no\") {\n\t\t// Get the story and the history stack\n\t\tvar storyList = $tw.wiki.getTiddlerList(DEFAULT_STORY_TITLE),\n\t\t\thistoryList = $tw.wiki.getTiddlerData(DEFAULT_HISTORY_TITLE,[]),\n\t\t\ttargetTiddler = \"\";\n\t\tif(options.targetTiddler) {\n\t\t\ttargetTiddler = options.targetTiddler;\n\t\t} else {\n\t\t\t// The target tiddler is the one at the top of the stack\n\t\t\tif(historyList.length > 0) {\n\t\t\t\ttargetTiddler = historyList[historyList.length-1].title;\n\t\t\t}\n\t\t\t// Blank the target tiddler if it isn't present in the story\n\t\t\tif(storyList.indexOf(targetTiddler) === -1) {\n\t\t\t\ttargetTiddler = \"\";\n\t\t\t}\n\t\t}\n\t\t// Assemble the location hash\n\t\tif(options.updateAddressBar === \"permalink\") {\n\t\t\t$tw.locationHash = \"#\" + encodeURIComponent(targetTiddler);\n\t\t} else {\n\t\t\t$tw.locationHash = \"#\" + encodeURIComponent(targetTiddler) + \":\" + encodeURIComponent($tw.utils.stringifyList(storyList));\n\t\t}\n\t\t// Only change the location hash if we must, thus avoiding unnecessary onhashchange events\n\t\tif($tw.utils.getLocationHash() !== $tw.locationHash) {\n\t\t\tif(options.updateHistory === \"yes\") {\n\t\t\t\t// Assign the location hash so that history is updated\n\t\t\t\twindow.location.hash = $tw.locationHash;\n\t\t\t} else {\n\t\t\t\t// We use replace so that browser history isn't affected\n\t\t\t\twindow.location.replace(window.location.toString().split(\"#\")[0] + $tw.locationHash);\n\t\t\t}\n\t\t}\n\t}\n}\n\n})();\n",
            "title": "$:/core/modules/startup/story.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/windows.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/windows.js\ntype: application/javascript\nmodule-type: startup\n\nSetup root widget handlers for the messages concerned with opening external browser windows\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"windows\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\n// Global to keep track of open windows (hashmap by title)\nvar windows = {};\n\nexports.startup = function() {\n\t// Handle open window message\n\t$tw.rootWidget.addEventListener(\"tm-open-window\",function(event) {\n\t\t// Get the parameters\n\t\tvar refreshHandler,\n\t\t\ttitle = event.param || event.tiddlerTitle,\n\t\t\tparamObject = event.paramObject || {},\n\t\t\ttemplate = paramObject.template || \"$:/core/templates/single.tiddler.window\",\n\t\t\twidth = paramObject.width || \"700\",\n\t\t\theight = paramObject.height || \"600\",\n\t\t\tvariables = $tw.utils.extend({},paramObject,{currentTiddler: title});\n\t\t// Open the window\n\t\tvar srcWindow = window.open(\"\",\"external-\" + title,\"scrollbars,width=\" + width + \",height=\" + height),\n\t\t\tsrcDocument = srcWindow.document;\n\t\twindows[title] = srcWindow;\n\t\t// Check for reopening the same window\n\t\tif(srcWindow.haveInitialisedWindow) {\n\t\t\treturn;\n\t\t}\n\t\t// Initialise the document\n\t\tsrcDocument.write(\"<html><head></head><body class='tc-body tc-single-tiddler-window'></body></html>\");\n\t\tsrcDocument.close();\n\t\tsrcDocument.title = title;\n\t\tsrcWindow.addEventListener(\"beforeunload\",function(event) {\n\t\t\tdelete windows[title];\n\t\t\t$tw.wiki.removeEventListener(\"change\",refreshHandler);\n\t\t},false);\n\t\t// Set up the styles\n\t\tvar styleWidgetNode = $tw.wiki.makeTranscludeWidget(\"$:/core/ui/PageStylesheet\",{document: $tw.fakeDocument, variables: variables}),\n\t\t\tstyleContainer = $tw.fakeDocument.createElement(\"style\");\n\t\tstyleWidgetNode.render(styleContainer,null);\n\t\tvar styleElement = srcDocument.createElement(\"style\");\n\t\tstyleElement.innerHTML = styleContainer.textContent;\n\t\tsrcDocument.head.insertBefore(styleElement,srcDocument.head.firstChild);\n\t\t// Render the text of the tiddler\n\t\tvar parser = $tw.wiki.parseTiddler(template),\n\t\t\twidgetNode = $tw.wiki.makeWidget(parser,{document: srcDocument, parentWidget: $tw.rootWidget, variables: variables});\n\t\twidgetNode.render(srcDocument.body,srcDocument.body.firstChild);\n\t\t// Function to handle refreshes\n\t\trefreshHandler = function(changes) {\n\t\t\tif(styleWidgetNode.refresh(changes,styleContainer,null)) {\n\t\t\t\tstyleElement.innerHTML = styleContainer.textContent;\n\t\t\t}\n\t\t\twidgetNode.refresh(changes);\n\t\t};\n\t\t$tw.wiki.addEventListener(\"change\",refreshHandler);\n\t\tsrcWindow.haveInitialisedWindow = true;\n\t});\n\t// Close open windows when unloading main window\n\t$tw.addUnloadTask(function() {\n\t\t$tw.utils.each(windows,function(win) {\n\t\t\twin.close();\n\t\t});\n\t});\n\n};\n\n})();\n",
            "title": "$:/core/modules/startup/windows.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/story.js": {
            "text": "/*\\\ntitle: $:/core/modules/story.js\ntype: application/javascript\nmodule-type: global\n\nLightweight object for managing interactions with the story and history lists.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nConstruct Story object with options:\nwiki: reference to wiki object to use to resolve tiddler titles\nstoryTitle: title of story list tiddler\nhistoryTitle: title of history list tiddler\n*/\nfunction Story(options) {\n\toptions = options || {};\n\tthis.wiki = options.wiki || $tw.wiki;\n\tthis.storyTitle = options.storyTitle || \"$:/StoryList\";\n\tthis.historyTitle = options.historyTitle || \"$:/HistoryList\";\n};\n\nStory.prototype.navigateTiddler = function(navigateTo,navigateFromTitle,navigateFromClientRect) {\n\tthis.addToStory(navigateTo,navigateFromTitle);\n\tthis.addToHistory(navigateTo,navigateFromClientRect);\n};\n\nStory.prototype.getStoryList = function() {\n\treturn this.wiki.getTiddlerList(this.storyTitle) || [];\n};\n\nStory.prototype.addToStory = function(navigateTo,navigateFromTitle,options) {\n\toptions = options || {};\n\tvar storyList = this.getStoryList();\n\t// See if the tiddler is already there\n\tvar slot = storyList.indexOf(navigateTo);\n\t// Quit if it already exists in the story river\n\tif(slot >= 0) {\n\t\treturn;\n\t}\n\t// First we try to find the position of the story element we navigated from\n\tvar fromIndex = storyList.indexOf(navigateFromTitle);\n\tif(fromIndex >= 0) {\n\t\t// The tiddler is added from inside the river\n\t\t// Determine where to insert the tiddler; Fallback is \"below\"\n\t\tswitch(options.openLinkFromInsideRiver) {\n\t\t\tcase \"top\":\n\t\t\t\tslot = 0;\n\t\t\t\tbreak;\n\t\t\tcase \"bottom\":\n\t\t\t\tslot = storyList.length;\n\t\t\t\tbreak;\n\t\t\tcase \"above\":\n\t\t\t\tslot = fromIndex;\n\t\t\t\tbreak;\n\t\t\tcase \"below\": // Intentional fall-through\n\t\t\tdefault:\n\t\t\t\tslot = fromIndex + 1;\n\t\t\t\tbreak;\n\t\t}\n\t} else {\n\t\t// The tiddler is opened from outside the river. Determine where to insert the tiddler; default is \"top\"\n\t\tif(options.openLinkFromOutsideRiver === \"bottom\") {\n\t\t\t// Insert at bottom\n\t\t\tslot = storyList.length;\n\t\t} else {\n\t\t\t// Insert at top\n\t\t\tslot = 0;\n\t\t}\n\t}\n\t// Add the tiddler\n\tstoryList.splice(slot,0,navigateTo);\n\t// Save the story\n\tthis.saveStoryList(storyList);\n};\n\nStory.prototype.saveStoryList = function(storyList) {\n\tvar storyTiddler = this.wiki.getTiddler(this.storyTitle);\n\tthis.wiki.addTiddler(new $tw.Tiddler(\n\t\tthis.wiki.getCreationFields(),\n\t\t{title: this.storyTitle},\n\t\tstoryTiddler,\n\t\t{list: storyList},\n\t\tthis.wiki.getModificationFields()\n\t));\n};\n\nStory.prototype.addToHistory = function(navigateTo,navigateFromClientRect) {\n\tvar titles = $tw.utils.isArray(navigateTo) ? navigateTo : [navigateTo];\n\t// Add a new record to the top of the history stack\n\tvar historyList = this.wiki.getTiddlerData(this.historyTitle,[]);\n\t$tw.utils.each(titles,function(title) {\n\t\thistoryList.push({title: title, fromPageRect: navigateFromClientRect});\n\t});\n\tthis.wiki.setTiddlerData(this.historyTitle,historyList,{\"current-tiddler\": titles[titles.length-1]});\n};\n\nStory.prototype.storyCloseTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyCloseAllTiddlers = function() {\n// TBD\n};\n\nStory.prototype.storyCloseOtherTiddlers = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyEditTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyDeleteTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storySaveTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyCancelTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyNewTiddler = function(targetTitle) {\n// TBD\n};\n\nexports.Story = Story;\n\n\n})();\n",
            "title": "$:/core/modules/story.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/storyviews/classic.js": {
            "text": "/*\\\ntitle: $:/core/modules/storyviews/classic.js\ntype: application/javascript\nmodule-type: storyview\n\nViews the story as a linear sequence\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar easing = \"cubic-bezier(0.645, 0.045, 0.355, 1)\"; // From http://easings.net/#easeInOutCubic\n\nvar ClassicStoryView = function(listWidget) {\n\tthis.listWidget = listWidget;\n};\n\nClassicStoryView.prototype.navigateTo = function(historyInfo) {\n\tvar listElementIndex = this.listWidget.findListItem(0,historyInfo.title);\n\tif(listElementIndex === undefined) {\n\t\treturn;\n\t}\n\tvar listItemWidget = this.listWidget.children[listElementIndex],\n\t\ttargetElement = listItemWidget.findFirstDomNode();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Scroll the node into view\n\tthis.listWidget.dispatchEvent({type: \"tm-scroll\", target: targetElement});\n};\n\nClassicStoryView.prototype.insert = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Get the current height of the tiddler\n\tvar computedStyle = window.getComputedStyle(targetElement),\n\t\tcurrMarginBottom = parseInt(computedStyle.marginBottom,10),\n\t\tcurrMarginTop = parseInt(computedStyle.marginTop,10),\n\t\tcurrHeight = targetElement.offsetHeight + currMarginTop;\n\t// Reset the margin once the transition is over\n\tsetTimeout(function() {\n\t\t$tw.utils.setStyle(targetElement,[\n\t\t\t{transition: \"none\"},\n\t\t\t{marginBottom: \"\"}\n\t\t]);\n\t},duration);\n\t// Set up the initial position of the element\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"none\"},\n\t\t{marginBottom: (-currHeight) + \"px\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n\t$tw.utils.forceLayout(targetElement);\n\t// Transition to the final position\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"opacity \" + duration + \"ms \" + easing + \", \" +\n\t\t\t\t\t\"margin-bottom \" + duration + \"ms \" + easing},\n\t\t{marginBottom: currMarginBottom + \"px\"},\n\t\t{opacity: \"1.0\"}\n\t]);\n};\n\nClassicStoryView.prototype.remove = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\tremoveElement = function() {\n\t\t\twidget.removeChildDomNodes();\n\t\t};\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\tremoveElement();\n\t\treturn;\n\t}\n\t// Get the current height of the tiddler\n\tvar currWidth = targetElement.offsetWidth,\n\t\tcomputedStyle = window.getComputedStyle(targetElement),\n\t\tcurrMarginBottom = parseInt(computedStyle.marginBottom,10),\n\t\tcurrMarginTop = parseInt(computedStyle.marginTop,10),\n\t\tcurrHeight = targetElement.offsetHeight + currMarginTop;\n\t// Remove the dom nodes of the widget at the end of the transition\n\tsetTimeout(removeElement,duration);\n\t// Animate the closure\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"none\"},\n\t\t{transform: \"translateX(0px)\"},\n\t\t{marginBottom:  currMarginBottom + \"px\"},\n\t\t{opacity: \"1.0\"}\n\t]);\n\t$tw.utils.forceLayout(targetElement);\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms \" + easing + \", \" +\n\t\t\t\t\t\"margin-bottom \" + duration + \"ms \" + easing},\n\t\t{transform: \"translateX(-\" + currWidth + \"px)\"},\n\t\t{marginBottom: (-currHeight) + \"px\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n};\n\nexports.classic = ClassicStoryView;\n\n})();",
            "title": "$:/core/modules/storyviews/classic.js",
            "type": "application/javascript",
            "module-type": "storyview"
        },
        "$:/core/modules/storyviews/pop.js": {
            "text": "/*\\\ntitle: $:/core/modules/storyviews/pop.js\ntype: application/javascript\nmodule-type: storyview\n\nAnimates list insertions and removals\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar PopStoryView = function(listWidget) {\n\tthis.listWidget = listWidget;\n};\n\nPopStoryView.prototype.navigateTo = function(historyInfo) {\n\tvar listElementIndex = this.listWidget.findListItem(0,historyInfo.title);\n\tif(listElementIndex === undefined) {\n\t\treturn;\n\t}\n\tvar listItemWidget = this.listWidget.children[listElementIndex],\n\t\ttargetElement = listItemWidget.findFirstDomNode();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Scroll the node into view\n\tthis.listWidget.dispatchEvent({type: \"tm-scroll\", target: targetElement});\n};\n\nPopStoryView.prototype.insert = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Reset once the transition is over\n\tsetTimeout(function() {\n\t\t$tw.utils.setStyle(targetElement,[\n\t\t\t{transition: \"none\"},\n\t\t\t{transform: \"none\"}\n\t\t]);\n\t},duration);\n\t// Set up the initial position of the element\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"none\"},\n\t\t{transform: \"scale(2)\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n\t$tw.utils.forceLayout(targetElement);\n\t// Transition to the final position\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms ease-in-out\"},\n\t\t{transform: \"scale(1)\"},\n\t\t{opacity: \"1.0\"}\n\t]);\n};\n\nPopStoryView.prototype.remove = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\tremoveElement = function() {\n\t\t\tif(targetElement.parentNode) {\n\t\t\t\twidget.removeChildDomNodes();\n\t\t\t}\n\t\t};\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\tremoveElement();\n\t\treturn;\n\t}\n\t// Remove the element at the end of the transition\n\tsetTimeout(removeElement,duration);\n\t// Animate the closure\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"none\"},\n\t\t{transform: \"scale(1)\"},\n\t\t{opacity: \"1.0\"}\n\t]);\n\t$tw.utils.forceLayout(targetElement);\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms ease-in-out\"},\n\t\t{transform: \"scale(0.1)\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n};\n\nexports.pop = PopStoryView;\n\n})();\n",
            "title": "$:/core/modules/storyviews/pop.js",
            "type": "application/javascript",
            "module-type": "storyview"
        },
        "$:/core/modules/storyviews/zoomin.js": {
            "text": "/*\\\ntitle: $:/core/modules/storyviews/zoomin.js\ntype: application/javascript\nmodule-type: storyview\n\nZooms between individual tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar easing = \"cubic-bezier(0.645, 0.045, 0.355, 1)\"; // From http://easings.net/#easeInOutCubic\n\nvar ZoominListView = function(listWidget) {\n\tvar self = this;\n\tthis.listWidget = listWidget;\n\t// Get the index of the tiddler that is at the top of the history\n\tvar history = this.listWidget.wiki.getTiddlerDataCached(this.listWidget.historyTitle,[]),\n\t\ttargetTiddler;\n\tif(history.length > 0) {\n\t\ttargetTiddler = history[history.length-1].title;\n\t}\n\t// Make all the tiddlers position absolute, and hide all but the top (or first) one\n\t$tw.utils.each(this.listWidget.children,function(itemWidget,index) {\n\t\tvar domNode = itemWidget.findFirstDomNode();\n\t\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\t\tif(!(domNode instanceof Element)) {\n\t\t\treturn;\n\t\t}\n\t\tif((targetTiddler && targetTiddler !== itemWidget.parseTreeNode.itemTitle) || (!targetTiddler && index)) {\n\t\t\tdomNode.style.display = \"none\";\n\t\t} else {\n\t\t\tself.currentTiddlerDomNode = domNode;\n\t\t}\n\t\t$tw.utils.addClass(domNode,\"tc-storyview-zoomin-tiddler\");\n\t});\n};\n\nZoominListView.prototype.navigateTo = function(historyInfo) {\n\tvar duration = $tw.utils.getAnimationDuration(),\n\t\tlistElementIndex = this.listWidget.findListItem(0,historyInfo.title);\n\tif(listElementIndex === undefined) {\n\t\treturn;\n\t}\n\tvar listItemWidget = this.listWidget.children[listElementIndex],\n\t\ttargetElement = listItemWidget.findFirstDomNode();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Make the new tiddler be position absolute and visible so that we can measure it\n\t$tw.utils.addClass(targetElement,\"tc-storyview-zoomin-tiddler\");\n\t$tw.utils.setStyle(targetElement,[\n\t\t{display: \"block\"},\n\t\t{transformOrigin: \"0 0\"},\n\t\t{transform: \"translateX(0px) translateY(0px) scale(1)\"},\n\t\t{transition: \"none\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n\t// Get the position of the source node, or use the centre of the window as the source position\n\tvar sourceBounds = historyInfo.fromPageRect || {\n\t\t\tleft: window.innerWidth/2 - 2,\n\t\t\ttop: window.innerHeight/2 - 2,\n\t\t\twidth: window.innerWidth/8,\n\t\t\theight: window.innerHeight/8\n\t\t};\n\t// Try to find the title node in the target tiddler\n\tvar titleDomNode = findTitleDomNode(listItemWidget) || listItemWidget.findFirstDomNode(),\n\t\tzoomBounds = titleDomNode.getBoundingClientRect();\n\t// Compute the transform for the target tiddler to make the title lie over the source rectange\n\tvar targetBounds = targetElement.getBoundingClientRect(),\n\t\tscale = sourceBounds.width / zoomBounds.width,\n\t\tx = sourceBounds.left - targetBounds.left - (zoomBounds.left - targetBounds.left) * scale,\n\t\ty = sourceBounds.top - targetBounds.top - (zoomBounds.top - targetBounds.top) * scale;\n\t// Transform the target tiddler to its starting position\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transform: \"translateX(\" + x + \"px) translateY(\" + y + \"px) scale(\" + scale + \")\"}\n\t]);\n\t// Force layout\n\t$tw.utils.forceLayout(targetElement);\n\t// Apply the ending transitions with a timeout to ensure that the previously applied transformations are applied first\n\tvar self = this,\n\t\tprevCurrentTiddler = this.currentTiddlerDomNode;\n\tthis.currentTiddlerDomNode = targetElement;\n\t// Transform the target tiddler to its natural size\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", opacity \" + duration + \"ms \" + easing},\n\t\t{opacity: \"1.0\"},\n\t\t{transform: \"translateX(0px) translateY(0px) scale(1)\"},\n\t\t{zIndex: \"500\"},\n\t]);\n\t// Transform the previous tiddler out of the way and then hide it\n\tif(prevCurrentTiddler && prevCurrentTiddler !== targetElement) {\n\t\tscale = zoomBounds.width / sourceBounds.width;\n\t\tx =  zoomBounds.left - targetBounds.left - (sourceBounds.left - targetBounds.left) * scale;\n\t\ty =  zoomBounds.top - targetBounds.top - (sourceBounds.top - targetBounds.top) * scale;\n\t\t$tw.utils.setStyle(prevCurrentTiddler,[\n\t\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", opacity \" + duration + \"ms \" + easing},\n\t\t\t{opacity: \"0.0\"},\n\t\t\t{transformOrigin: \"0 0\"},\n\t\t\t{transform: \"translateX(\" + x + \"px) translateY(\" + y + \"px) scale(\" + scale + \")\"},\n\t\t\t{zIndex: \"0\"}\n\t\t]);\n\t\t// Hide the tiddler when the transition has finished\n\t\tsetTimeout(function() {\n\t\t\tif(self.currentTiddlerDomNode !== prevCurrentTiddler) {\n\t\t\t\tprevCurrentTiddler.style.display = \"none\";\n\t\t\t}\n\t\t},duration);\n\t}\n\t// Scroll the target into view\n//\t$tw.pageScroller.scrollIntoView(targetElement);\n};\n\n/*\nFind the first child DOM node of a widget that has the class \"tc-title\"\n*/\nfunction findTitleDomNode(widget,targetClass) {\n\ttargetClass = targetClass || \"tc-title\";\n\tvar domNode = widget.findFirstDomNode();\n\tif(domNode && domNode.querySelector) {\n\t\treturn domNode.querySelector(\".\" + targetClass);\n\t}\n\treturn null;\n}\n\nZoominListView.prototype.insert = function(widget) {\n\tvar targetElement = widget.findFirstDomNode();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Make the newly inserted node position absolute and hidden\n\t$tw.utils.addClass(targetElement,\"tc-storyview-zoomin-tiddler\");\n\t$tw.utils.setStyle(targetElement,[\n\t\t{display: \"none\"}\n\t]);\n};\n\nZoominListView.prototype.remove = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\tremoveElement = function() {\n\t\t\twidget.removeChildDomNodes();\n\t\t};\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\tremoveElement();\n\t\treturn;\n\t}\n\t// Abandon if hidden\n\tif(targetElement.style.display != \"block\" ) {\n\t\tremoveElement();\n\t\treturn;\n\t}\n\t// Set up the tiddler that is being closed\n\t$tw.utils.addClass(targetElement,\"tc-storyview-zoomin-tiddler\");\n\t$tw.utils.setStyle(targetElement,[\n\t\t{display: \"block\"},\n\t\t{transformOrigin: \"50% 50%\"},\n\t\t{transform: \"translateX(0px) translateY(0px) scale(1)\"},\n\t\t{transition: \"none\"},\n\t\t{zIndex: \"0\"}\n\t]);\n\t// We'll move back to the previous or next element in the story\n\tvar toWidget = widget.previousSibling();\n\tif(!toWidget) {\n\t\ttoWidget = widget.nextSibling();\n\t}\n\tvar toWidgetDomNode = toWidget && toWidget.findFirstDomNode();\n\t// Set up the tiddler we're moving back in\n\tif(toWidgetDomNode) {\n\t\t$tw.utils.addClass(toWidgetDomNode,\"tc-storyview-zoomin-tiddler\");\n\t\t$tw.utils.setStyle(toWidgetDomNode,[\n\t\t\t{display: \"block\"},\n\t\t\t{transformOrigin: \"50% 50%\"},\n\t\t\t{transform: \"translateX(0px) translateY(0px) scale(10)\"},\n\t\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", opacity \" + duration + \"ms \" + easing},\n\t\t\t{opacity: \"0\"},\n\t\t\t{zIndex: \"500\"}\n\t\t]);\n\t\tthis.currentTiddlerDomNode = toWidgetDomNode;\n\t}\n\t// Animate them both\n\t// Force layout\n\t$tw.utils.forceLayout(this.listWidget.parentDomNode);\n\t// First, the tiddler we're closing\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transformOrigin: \"50% 50%\"},\n\t\t{transform: \"translateX(0px) translateY(0px) scale(0.1)\"},\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", opacity \" + duration + \"ms \" + easing},\n\t\t{opacity: \"0\"},\n\t\t{zIndex: \"0\"}\n\t]);\n\tsetTimeout(removeElement,duration);\n\t// Now the tiddler we're going back to\n\tif(toWidgetDomNode) {\n\t\t$tw.utils.setStyle(toWidgetDomNode,[\n\t\t\t{transform: \"translateX(0px) translateY(0px) scale(1)\"},\n\t\t\t{opacity: \"1\"}\n\t\t]);\n\t}\n\treturn true; // Indicate that we'll delete the DOM node\n};\n\nexports.zoomin = ZoominListView;\n\n})();\n",
            "title": "$:/core/modules/storyviews/zoomin.js",
            "type": "application/javascript",
            "module-type": "storyview"
        },
        "$:/core/modules/syncer.js": {
            "text": "/*\\\ntitle: $:/core/modules/syncer.js\ntype: application/javascript\nmodule-type: global\n\nThe syncer tracks changes to the store. If a syncadaptor is used then individual tiddlers are synchronised through it. If there is no syncadaptor then the entire wiki is saved via saver modules.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInstantiate the syncer with the following options:\nsyncadaptor: reference to syncadaptor to be used\nwiki: wiki to be synced\n*/\nfunction Syncer(options) {\n\tvar self = this;\n\tthis.wiki = options.wiki;\n\tthis.syncadaptor = options.syncadaptor;\n\t// Make a logger\n\tthis.logger = new $tw.utils.Logger(\"syncer\" + ($tw.browser ? \"-browser\" : \"\") + ($tw.node ? \"-server\" : \"\"));\n\t// Compile the dirty tiddler filter\n\tthis.filterFn = this.wiki.compileFilter(this.wiki.getTiddlerText(this.titleSyncFilter));\n\t// Record information for known tiddlers\n\tthis.readTiddlerInfo();\n\t// Tasks are {type: \"load\"/\"save\"/\"delete\", title:, queueTime:, lastModificationTime:}\n\tthis.taskQueue = {}; // Hashmap of tasks yet to be performed\n\tthis.taskInProgress = {}; // Hash of tasks in progress\n\tthis.taskTimerId = null; // Timer for task dispatch\n\tthis.pollTimerId = null; // Timer for polling server\n\t// Listen out for changes to tiddlers\n\tthis.wiki.addEventListener(\"change\",function(changes) {\n\t\tself.syncToServer(changes);\n\t});\n\t// Browser event handlers\n\tif($tw.browser) {\n\t\t// Set up our beforeunload handler\n\t\t$tw.addUnloadTask(function(event) {\n\t\t\tvar confirmationMessage;\n\t\t\tif(self.isDirty()) {\n\t\t\t\tconfirmationMessage = $tw.language.getString(\"UnsavedChangesWarning\");\n\t\t\t\tevent.returnValue = confirmationMessage; // Gecko\n\t\t\t}\n\t\t\treturn confirmationMessage;\n\t\t});\n\t\t// Listen out for login/logout/refresh events in the browser\n\t\t$tw.rootWidget.addEventListener(\"tm-login\",function() {\n\t\t\tself.handleLoginEvent();\n\t\t});\n\t\t$tw.rootWidget.addEventListener(\"tm-logout\",function() {\n\t\t\tself.handleLogoutEvent();\n\t\t});\n\t\t$tw.rootWidget.addEventListener(\"tm-server-refresh\",function() {\n\t\t\tself.handleRefreshEvent();\n\t\t});\n\t}\n\t// Listen out for lazyLoad events\n\tthis.wiki.addEventListener(\"lazyLoad\",function(title) {\n\t\tself.handleLazyLoadEvent(title);\n\t});\n\t// Get the login status\n\tthis.getStatus(function(err,isLoggedIn) {\n\t\t// Do a sync from the server\n\t\tself.syncFromServer();\n\t});\n}\n\n/*\nConstants\n*/\nSyncer.prototype.titleIsLoggedIn = \"$:/status/IsLoggedIn\";\nSyncer.prototype.titleUserName = \"$:/status/UserName\";\nSyncer.prototype.titleSyncFilter = \"$:/config/SyncFilter\";\nSyncer.prototype.titleSavedNotification = \"$:/language/Notifications/Save/Done\";\nSyncer.prototype.taskTimerInterval = 1 * 1000; // Interval for sync timer\nSyncer.prototype.throttleInterval = 1 * 1000; // Defer saving tiddlers if they've changed in the last 1s...\nSyncer.prototype.fallbackInterval = 10 * 1000; // Unless the task is older than 10s\nSyncer.prototype.pollTimerInterval = 60 * 1000; // Interval for polling for changes from the adaptor\n\n\n/*\nRead (or re-read) the latest tiddler info from the store\n*/\nSyncer.prototype.readTiddlerInfo = function() {\n\t// Hashmap by title of {revision:,changeCount:,adaptorInfo:}\n\tthis.tiddlerInfo = {};\n\t// Record information for known tiddlers\n\tvar self = this,\n\t\ttiddlers = this.filterFn.call(this.wiki);\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar tiddler = self.wiki.getTiddler(title);\n\t\tself.tiddlerInfo[title] = {\n\t\t\trevision: tiddler.fields.revision,\n\t\t\tadaptorInfo: self.syncadaptor && self.syncadaptor.getTiddlerInfo(tiddler),\n\t\t\tchangeCount: self.wiki.getChangeCount(title),\n\t\t\thasBeenLazyLoaded: false\n\t\t};\n\t});\n};\n\n/*\nCreate an tiddlerInfo structure if it doesn't already exist\n*/\nSyncer.prototype.createTiddlerInfo = function(title) {\n\tif(!$tw.utils.hop(this.tiddlerInfo,title)) {\n\t\tthis.tiddlerInfo[title] = {\n\t\t\trevision: null,\n\t\t\tadaptorInfo: {},\n\t\t\tchangeCount: -1,\n\t\t\thasBeenLazyLoaded: false\n\t\t};\n\t}\n};\n\n/*\nChecks whether the wiki is dirty (ie the window shouldn't be closed)\n*/\nSyncer.prototype.isDirty = function() {\n\treturn (this.numTasksInQueue() > 0) || (this.numTasksInProgress() > 0);\n};\n\n/*\nUpdate the document body with the class \"tc-dirty\" if the wiki has unsaved/unsynced changes\n*/\nSyncer.prototype.updateDirtyStatus = function() {\n\tif($tw.browser) {\n\t\t$tw.utils.toggleClass(document.body,\"tc-dirty\",this.isDirty());\n\t}\n};\n\n/*\nSave an incoming tiddler in the store, and updates the associated tiddlerInfo\n*/\nSyncer.prototype.storeTiddler = function(tiddlerFields) {\n\t// Save the tiddler\n\tvar tiddler = new $tw.Tiddler(this.wiki.getTiddler(tiddlerFields.title),tiddlerFields);\n\tthis.wiki.addTiddler(tiddler);\n\t// Save the tiddler revision and changeCount details\n\tthis.tiddlerInfo[tiddlerFields.title] = {\n\t\trevision: tiddlerFields.revision,\n\t\tadaptorInfo: this.syncadaptor.getTiddlerInfo(tiddler),\n\t\tchangeCount: this.wiki.getChangeCount(tiddlerFields.title),\n\t\thasBeenLazyLoaded: true\n\t};\n};\n\nSyncer.prototype.getStatus = function(callback) {\n\tvar self = this;\n\t// Check if the adaptor supports getStatus()\n\tif(this.syncadaptor && this.syncadaptor.getStatus) {\n\t\t// Mark us as not logged in\n\t\tthis.wiki.addTiddler({title: this.titleIsLoggedIn,text: \"no\"});\n\t\t// Get login status\n\t\tthis.syncadaptor.getStatus(function(err,isLoggedIn,username) {\n\t\t\tif(err) {\n\t\t\t\tself.logger.alert(err);\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t// Set the various status tiddlers\n\t\t\tself.wiki.addTiddler({title: self.titleIsLoggedIn,text: isLoggedIn ? \"yes\" : \"no\"});\n\t\t\tif(isLoggedIn) {\n\t\t\t\tself.wiki.addTiddler({title: self.titleUserName,text: username || \"\"});\n\t\t\t} else {\n\t\t\t\tself.wiki.deleteTiddler(self.titleUserName);\n\t\t\t}\n\t\t\t// Invoke the callback\n\t\t\tif(callback) {\n\t\t\t\tcallback(err,isLoggedIn,username);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tcallback(null,true,\"UNAUTHENTICATED\");\n\t}\n};\n\n/*\nSynchronise from the server by reading the skinny tiddler list and queuing up loads for any tiddlers that we don't already have up to date\n*/\nSyncer.prototype.syncFromServer = function() {\n\tif(this.syncadaptor && this.syncadaptor.getSkinnyTiddlers) {\n\t\tthis.logger.log(\"Retrieving skinny tiddler list\");\n\t\tvar self = this;\n\t\tif(this.pollTimerId) {\n\t\t\tclearTimeout(this.pollTimerId);\n\t\t\tthis.pollTimerId = null;\n\t\t}\n\t\tthis.syncadaptor.getSkinnyTiddlers(function(err,tiddlers) {\n\t\t\t// Trigger the next sync\n\t\t\tself.pollTimerId = setTimeout(function() {\n\t\t\t\tself.pollTimerId = null;\n\t\t\t\tself.syncFromServer.call(self);\n\t\t\t},self.pollTimerInterval);\n\t\t\t// Check for errors\n\t\t\tif(err) {\n\t\t\t\tself.logger.alert($tw.language.getString(\"Error/RetrievingSkinny\") + \":\",err);\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t// Process each incoming tiddler\n\t\t\tfor(var t=0; t<tiddlers.length; t++) {\n\t\t\t\t// Get the incoming tiddler fields, and the existing tiddler\n\t\t\t\tvar tiddlerFields = tiddlers[t],\n\t\t\t\t\tincomingRevision = tiddlerFields.revision + \"\",\n\t\t\t\t\ttiddler = self.wiki.getTiddler(tiddlerFields.title),\n\t\t\t\t\ttiddlerInfo = self.tiddlerInfo[tiddlerFields.title],\n\t\t\t\t\tcurrRevision = tiddlerInfo ? tiddlerInfo.revision : null;\n\t\t\t\t// Ignore the incoming tiddler if it's the same as the revision we've already got\n\t\t\t\tif(currRevision !== incomingRevision) {\n\t\t\t\t\t// Do a full load if we've already got a fat version of the tiddler\n\t\t\t\t\tif(tiddler && tiddler.fields.text !== undefined) {\n\t\t\t\t\t\t// Do a full load of this tiddler\n\t\t\t\t\t\tself.enqueueSyncTask({\n\t\t\t\t\t\t\ttype: \"load\",\n\t\t\t\t\t\t\ttitle: tiddlerFields.title\n\t\t\t\t\t\t});\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Load the skinny version of the tiddler\n\t\t\t\t\t\tself.storeTiddler(tiddlerFields);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n};\n\n/*\nSynchronise a set of changes to the server\n*/\nSyncer.prototype.syncToServer = function(changes) {\n\tvar self = this,\n\t\tnow = Date.now(),\n\t\tfilteredChanges = this.filterFn.call(this.wiki,function(callback) {\n\t\t\t$tw.utils.each(changes,function(change,title) {\n\t\t\t\tvar tiddler = self.wiki.getTiddler(title);\n\t\t\t\tcallback(tiddler,title);\n\t\t\t});\n\t\t});\n\t$tw.utils.each(changes,function(change,title,object) {\n\t\t// Process the change if it is a deletion of a tiddler we're already syncing, or is on the filtered change list\n\t\tif((change.deleted && $tw.utils.hop(self.tiddlerInfo,title)) || filteredChanges.indexOf(title) !== -1) {\n\t\t\t// Queue a task to sync this tiddler\n\t\t\tself.enqueueSyncTask({\n\t\t\t\ttype: change.deleted ? \"delete\" : \"save\",\n\t\t\t\ttitle: title\n\t\t\t});\n\t\t}\n\t});\n};\n\n/*\nLazily load a skinny tiddler if we can\n*/\nSyncer.prototype.handleLazyLoadEvent = function(title) {\n\t// Don't lazy load the same tiddler twice\n\tvar info = this.tiddlerInfo[title];\n\tif(!info || !info.hasBeenLazyLoaded) {\n\t\tthis.createTiddlerInfo(title);\n\t\tthis.tiddlerInfo[title].hasBeenLazyLoaded = true;\n\t\t// Queue up a sync task to load this tiddler\n\t\tthis.enqueueSyncTask({\n\t\t\ttype: \"load\",\n\t\t\ttitle: title\n\t\t});\t\t\n\t}\n};\n\n/*\nDispay a password prompt and allow the user to login\n*/\nSyncer.prototype.handleLoginEvent = function() {\n\tvar self = this;\n\tthis.getStatus(function(err,isLoggedIn,username) {\n\t\tif(!isLoggedIn) {\n\t\t\t$tw.passwordPrompt.createPrompt({\n\t\t\t\tserviceName: $tw.language.getString(\"LoginToTiddlySpace\"),\n\t\t\t\tcallback: function(data) {\n\t\t\t\t\tself.login(data.username,data.password,function(err,isLoggedIn) {\n\t\t\t\t\t\tself.syncFromServer();\n\t\t\t\t\t});\n\t\t\t\t\treturn true; // Get rid of the password prompt\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n};\n\n/*\nAttempt to login to TiddlyWeb.\n\tusername: username\n\tpassword: password\n\tcallback: invoked with arguments (err,isLoggedIn)\n*/\nSyncer.prototype.login = function(username,password,callback) {\n\tthis.logger.log(\"Attempting to login as\",username);\n\tvar self = this;\n\tif(this.syncadaptor.login) {\n\t\tthis.syncadaptor.login(username,password,function(err) {\n\t\t\tif(err) {\n\t\t\t\treturn callback(err);\n\t\t\t}\n\t\t\tself.getStatus(function(err,isLoggedIn,username) {\n\t\t\t\tif(callback) {\n\t\t\t\t\tcallback(null,isLoggedIn);\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\t} else {\n\t\tcallback(null,true);\n\t}\n};\n\n/*\nAttempt to log out of TiddlyWeb\n*/\nSyncer.prototype.handleLogoutEvent = function() {\n\tthis.logger.log(\"Attempting to logout\");\n\tvar self = this;\n\tif(this.syncadaptor.logout) {\n\t\tthis.syncadaptor.logout(function(err) {\n\t\t\tif(err) {\n\t\t\t\tself.logger.alert(err);\n\t\t\t} else {\n\t\t\t\tself.getStatus();\n\t\t\t}\n\t\t});\n\t}\n};\n\n/*\nImmediately refresh from the server\n*/\nSyncer.prototype.handleRefreshEvent = function() {\n\tthis.syncFromServer();\n};\n\n/*\nQueue up a sync task. If there is already a pending task for the tiddler, just update the last modification time\n*/\nSyncer.prototype.enqueueSyncTask = function(task) {\n\tvar self = this,\n\t\tnow = Date.now();\n\t// Set the timestamps on this task\n\ttask.queueTime = now;\n\ttask.lastModificationTime = now;\n\t// Fill in some tiddlerInfo if the tiddler is one we haven't seen before\n\tthis.createTiddlerInfo(task.title);\n\t// Bail if this is a save and the tiddler is already at the changeCount that the server has\n\tif(task.type === \"save\" && this.wiki.getChangeCount(task.title) <= this.tiddlerInfo[task.title].changeCount) {\n\t\treturn;\n\t}\n\t// Check if this tiddler is already in the queue\n\tif($tw.utils.hop(this.taskQueue,task.title)) {\n\t\t// this.logger.log(\"Re-queueing up sync task with type:\",task.type,\"title:\",task.title);\n\t\tvar existingTask = this.taskQueue[task.title];\n\t\t// If so, just update the last modification time\n\t\texistingTask.lastModificationTime = task.lastModificationTime;\n\t\t// If the new task is a save then we upgrade the existing task to a save. Thus a pending load is turned into a save if the tiddler changes locally in the meantime. But a pending save is not modified to become a load\n\t\tif(task.type === \"save\" || task.type === \"delete\") {\n\t\t\texistingTask.type = task.type;\n\t\t}\n\t} else {\n\t\t// this.logger.log(\"Queuing up sync task with type:\",task.type,\"title:\",task.title);\n\t\t// If it is not in the queue, insert it\n\t\tthis.taskQueue[task.title] = task;\n\t\tthis.updateDirtyStatus();\n\t}\n\t// Process the queue\n\t$tw.utils.nextTick(function() {self.processTaskQueue.call(self);});\n};\n\n/*\nReturn the number of tasks in progress\n*/\nSyncer.prototype.numTasksInProgress = function() {\n\treturn $tw.utils.count(this.taskInProgress);\n};\n\n/*\nReturn the number of tasks in the queue\n*/\nSyncer.prototype.numTasksInQueue = function() {\n\treturn $tw.utils.count(this.taskQueue);\n};\n\n/*\nTrigger a timeout if one isn't already outstanding\n*/\nSyncer.prototype.triggerTimeout = function() {\n\tvar self = this;\n\tif(!this.taskTimerId) {\n\t\tthis.taskTimerId = setTimeout(function() {\n\t\t\tself.taskTimerId = null;\n\t\t\tself.processTaskQueue.call(self);\n\t\t},self.taskTimerInterval);\n\t}\n};\n\n/*\nProcess the task queue, performing the next task if appropriate\n*/\nSyncer.prototype.processTaskQueue = function() {\n\tvar self = this;\n\t// Only process a task if the sync adaptor is fully initialised and we're not already performing a task. If we are already performing a task then we'll dispatch the next one when it completes\n\tif(this.syncadaptor.isReady() && this.numTasksInProgress() === 0) {\n\t\t// Choose the next task to perform\n\t\tvar task = this.chooseNextTask();\n\t\t// Perform the task if we had one\n\t\tif(task) {\n\t\t\t// Remove the task from the queue and add it to the in progress list\n\t\t\tdelete this.taskQueue[task.title];\n\t\t\tthis.taskInProgress[task.title] = task;\n\t\t\tthis.updateDirtyStatus();\n\t\t\t// Dispatch the task\n\t\t\tthis.dispatchTask(task,function(err) {\n\t\t\t\tif(err) {\n\t\t\t\t\tself.logger.alert(\"Sync error while processing '\" + task.title + \"':\\n\" + err);\n\t\t\t\t}\n\t\t\t\t// Mark that this task is no longer in progress\n\t\t\t\tdelete self.taskInProgress[task.title];\n\t\t\t\tself.updateDirtyStatus();\n\t\t\t\t// Process the next task\n\t\t\t\tself.processTaskQueue.call(self);\n\t\t\t});\n\t\t} else {\n\t\t\t// Make sure we've set a time if there wasn't a task to perform, but we've still got tasks in the queue\n\t\t\tif(this.numTasksInQueue() > 0) {\n\t\t\t\tthis.triggerTimeout();\n\t\t\t}\n\t\t}\n\t}\n};\n\n/*\nChoose the next applicable task\n*/\nSyncer.prototype.chooseNextTask = function() {\n\tvar self = this,\n\t\tcandidateTask = null,\n\t\tnow = Date.now();\n\t// Select the best candidate task\n\t$tw.utils.each(this.taskQueue,function(task,title) {\n\t\t// Exclude the task if there's one of the same name in progress\n\t\tif($tw.utils.hop(self.taskInProgress,title)) {\n\t\t\treturn;\n\t\t}\n\t\t// Exclude the task if it is a save and the tiddler has been modified recently, but not hit the fallback time\n\t\tif(task.type === \"save\" && (now - task.lastModificationTime) < self.throttleInterval &&\n\t\t\t(now - task.queueTime) < self.fallbackInterval) {\n\t\t\treturn;\n\t\t}\n\t\t// Exclude the task if it is newer than the current best candidate\n\t\tif(candidateTask && candidateTask.queueTime < task.queueTime) {\n\t\t\treturn;\n\t\t}\n\t\t// Now this is our best candidate\n\t\tcandidateTask = task;\n\t});\n\treturn candidateTask;\n};\n\n/*\nDispatch a task and invoke the callback\n*/\nSyncer.prototype.dispatchTask = function(task,callback) {\n\tvar self = this;\n\tif(task.type === \"save\") {\n\t\tvar changeCount = this.wiki.getChangeCount(task.title),\n\t\t\ttiddler = this.wiki.getTiddler(task.title);\n\t\tthis.logger.log(\"Dispatching 'save' task:\",task.title);\n\t\tif(tiddler) {\n\t\t\tthis.syncadaptor.saveTiddler(tiddler,function(err,adaptorInfo,revision) {\n\t\t\t\tif(err) {\n\t\t\t\t\treturn callback(err);\n\t\t\t\t}\n\t\t\t\t// Adjust the info stored about this tiddler\n\t\t\t\tself.tiddlerInfo[task.title] = {\n\t\t\t\t\tchangeCount: changeCount,\n\t\t\t\t\tadaptorInfo: adaptorInfo,\n\t\t\t\t\trevision: revision\n\t\t\t\t};\n\t\t\t\t// Invoke the callback\n\t\t\t\tcallback(null);\n\t\t\t},{\n\t\t\t\ttiddlerInfo: self.tiddlerInfo[task.title]\n\t\t\t});\n\t\t} else {\n\t\t\tthis.logger.log(\" Not Dispatching 'save' task:\",task.title,\"tiddler does not exist\");\n\t\t\treturn callback(null);\n\t\t}\n\t} else if(task.type === \"load\") {\n\t\t// Load the tiddler\n\t\tthis.logger.log(\"Dispatching 'load' task:\",task.title);\n\t\tthis.syncadaptor.loadTiddler(task.title,function(err,tiddlerFields) {\n\t\t\tif(err) {\n\t\t\t\treturn callback(err);\n\t\t\t}\n\t\t\t// Store the tiddler\n\t\t\tif(tiddlerFields) {\n\t\t\t\tself.storeTiddler(tiddlerFields);\n\t\t\t}\n\t\t\t// Invoke the callback\n\t\t\tcallback(null);\n\t\t});\n\t} else if(task.type === \"delete\") {\n\t\t// Delete the tiddler\n\t\tthis.logger.log(\"Dispatching 'delete' task:\",task.title);\n\t\tthis.syncadaptor.deleteTiddler(task.title,function(err) {\n\t\t\tif(err) {\n\t\t\t\treturn callback(err);\n\t\t\t}\n\t\t\tdelete self.tiddlerInfo[task.title];\n\t\t\t// Invoke the callback\n\t\t\tcallback(null);\n\t\t},{\n\t\t\ttiddlerInfo: self.tiddlerInfo[task.title]\n\t\t});\n\t}\n};\n\nexports.Syncer = Syncer;\n\n})();\n",
            "title": "$:/core/modules/syncer.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/tiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/tiddler.js\ntype: application/javascript\nmodule-type: tiddlermethod\n\nExtension methods for the $tw.Tiddler object (constructor and methods required at boot time are in boot/boot.js)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.hasTag = function(tag) {\n\treturn this.fields.tags && this.fields.tags.indexOf(tag) !== -1;\n};\n\nexports.isPlugin = function() {\n\treturn this.fields.type === \"application/json\" && this.hasField(\"plugin-type\");\n};\n\nexports.isDraft = function() {\n\treturn this.hasField(\"draft.of\");\n};\n\nexports.getFieldString = function(field) {\n\tvar value = this.fields[field];\n\t// Check for a missing field\n\tif(value === undefined || value === null) {\n\t\treturn \"\";\n\t}\n\t// Parse the field with the associated module (if any)\n\tvar fieldModule = $tw.Tiddler.fieldModules[field];\n\tif(fieldModule && fieldModule.stringify) {\n\t\treturn fieldModule.stringify.call(this,value);\n\t} else {\n\t\treturn value.toString();\n\t}\n};\n\n/*\nGet all the fields as a name:value block. Options:\n\texclude: an array of field names to exclude\n*/\nexports.getFieldStringBlock = function(options) {\n\toptions = options || {};\n\tvar exclude = options.exclude || [];\n\tvar fields = [];\n\tfor(var field in this.fields) {\n\t\tif($tw.utils.hop(this.fields,field)) {\n\t\t\tif(exclude.indexOf(field) === -1) {\n\t\t\t\tfields.push(field + \": \" + this.getFieldString(field));\n\t\t\t}\n\t\t}\n\t}\n\treturn fields.join(\"\\n\");\n};\n\n/*\nCompare two tiddlers for equality\ntiddler: the tiddler to compare\nexcludeFields: array of field names to exclude from the comparison\n*/\nexports.isEqual = function(tiddler,excludeFields) {\n\tif(!(tiddler instanceof $tw.Tiddler)) {\n\t\treturn false;\n\t}\n\texcludeFields = excludeFields || [];\n\tvar self = this,\n\t\tdifferences = []; // Fields that have differences\n\t// Add to the differences array\n\tfunction addDifference(fieldName) {\n\t\t// Check for this field being excluded\n\t\tif(excludeFields.indexOf(fieldName) === -1) {\n\t\t\t// Save the field as a difference\n\t\t\t$tw.utils.pushTop(differences,fieldName);\n\t\t}\n\t}\n\t// Returns true if the two values of this field are equal\n\tfunction isFieldValueEqual(fieldName) {\n\t\tvar valueA = self.fields[fieldName],\n\t\t\tvalueB = tiddler.fields[fieldName];\n\t\t// Check for identical string values\n\t\tif(typeof(valueA) === \"string\" && typeof(valueB) === \"string\" && valueA === valueB) {\n\t\t\treturn true;\n\t\t}\n\t\t// Check for identical array values\n\t\tif($tw.utils.isArray(valueA) && $tw.utils.isArray(valueB) && $tw.utils.isArrayEqual(valueA,valueB)) {\n\t\t\treturn true;\n\t\t}\n\t\t// Otherwise the fields must be different\n\t\treturn false;\n\t}\n\t// Compare our fields\n\tfor(var fieldName in this.fields) {\n\t\tif(!isFieldValueEqual(fieldName)) {\n\t\t\taddDifference(fieldName);\n\t\t}\n\t}\n\t// There's a difference for every field in the other tiddler that we don't have\n\tfor(fieldName in tiddler.fields) {\n\t\tif(!(fieldName in this.fields)) {\n\t\t\taddDifference(fieldName);\n\t\t}\n\t}\n\t// Return whether there were any differences\n\treturn differences.length === 0;\n};\n\n})();\n",
            "title": "$:/core/modules/tiddler.js",
            "type": "application/javascript",
            "module-type": "tiddlermethod"
        },
        "$:/core/modules/upgraders/plugins.js": {
            "text": "/*\\\ntitle: $:/core/modules/upgraders/plugins.js\ntype: application/javascript\nmodule-type: upgrader\n\nUpgrader module that checks that plugins are newer than any already installed version\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar UPGRADE_LIBRARY_TITLE = \"$:/UpgradeLibrary\";\n\nvar BLOCKED_PLUGINS = {\n\t\"$:/themes/tiddlywiki/stickytitles\": {\n\t\tversions: [\"*\"]\n\t},\n\t\"$:/plugins/tiddlywiki/fullscreen\": {\n\t\tversions: [\"*\"]\n\t}\n};\n\nexports.upgrade = function(wiki,titles,tiddlers) {\n\tvar self = this,\n\t\tmessages = {},\n\t\tupgradeLibrary,\n\t\tgetLibraryTiddler = function(title) {\n\t\t\tif(!upgradeLibrary) {\n\t\t\t\tupgradeLibrary = wiki.getTiddlerData(UPGRADE_LIBRARY_TITLE,{});\n\t\t\t\tupgradeLibrary.tiddlers = upgradeLibrary.tiddlers || {};\n\t\t\t}\n\t\t\treturn upgradeLibrary.tiddlers[title];\n\t\t};\n\n\t// Go through all the incoming tiddlers\n\t$tw.utils.each(titles,function(title) {\n\t\tvar incomingTiddler = tiddlers[title];\n\t\t// Check if we're dealing with a plugin\n\t\tif(incomingTiddler && incomingTiddler[\"plugin-type\"] && incomingTiddler.version) {\n\t\t\t// Upgrade the incoming plugin if it is in the upgrade library\n\t\t\tvar libraryTiddler = getLibraryTiddler(title);\n\t\t\tif(libraryTiddler && libraryTiddler[\"plugin-type\"] && libraryTiddler.version) {\n\t\t\t\ttiddlers[title] = libraryTiddler;\n\t\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/Plugins/Upgraded\",{variables: {incoming: incomingTiddler.version, upgraded: libraryTiddler.version}});\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t// Suppress the incoming plugin if it is older than the currently installed one\n\t\t\tvar existingTiddler = wiki.getTiddler(title);\n\t\t\tif(existingTiddler && existingTiddler.hasField(\"plugin-type\") && existingTiddler.hasField(\"version\")) {\n\t\t\t\t// Reject the incoming plugin by blanking all its fields\n\t\t\t\tif($tw.utils.checkVersions(existingTiddler.fields.version,incomingTiddler.version)) {\n\t\t\t\t\ttiddlers[title] = Object.create(null);\n\t\t\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/Plugins/Suppressed/Version\",{variables: {incoming: incomingTiddler.version, existing: existingTiddler.fields.version}});\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif(incomingTiddler && incomingTiddler[\"plugin-type\"]) {\n\t\t\t// Check whether the plugin is on the blocked list\n\t\t\tvar blockInfo = BLOCKED_PLUGINS[title];\n\t\t\tif(blockInfo) {\n\t\t\t\tif(blockInfo.versions.indexOf(\"*\") !== -1 || (incomingTiddler.version && blockInfo.versions.indexOf(incomingTiddler.version) !== -1)) {\n\t\t\t\t\ttiddlers[title] = Object.create(null);\n\t\t\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/Plugins/Suppressed/Incompatible\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n\treturn messages;\n};\n\n})();\n",
            "title": "$:/core/modules/upgraders/plugins.js",
            "type": "application/javascript",
            "module-type": "upgrader"
        },
        "$:/core/modules/upgraders/system.js": {
            "text": "/*\\\ntitle: $:/core/modules/upgraders/system.js\ntype: application/javascript\nmodule-type: upgrader\n\nUpgrader module that suppresses certain system tiddlers that shouldn't be imported\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar DONT_IMPORT_LIST = [\"$:/StoryList\",\"$:/HistoryList\"],\n\tDONT_IMPORT_PREFIX_LIST = [\"$:/temp/\",\"$:/state/\"];\n\nexports.upgrade = function(wiki,titles,tiddlers) {\n\tvar self = this,\n\t\tmessages = {};\n\t// Check for tiddlers on our list\n\t$tw.utils.each(titles,function(title) {\n\t\tif(DONT_IMPORT_LIST.indexOf(title) !== -1) {\n\t\t\ttiddlers[title] = Object.create(null);\n\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/System/Suppressed\");\n\t\t} else {\n\t\t\tfor(var t=0; t<DONT_IMPORT_PREFIX_LIST.length; t++) {\n\t\t\t\tvar prefix = DONT_IMPORT_PREFIX_LIST[t];\n\t\t\t\tif(title.substr(0,prefix.length) === prefix) {\n\t\t\t\t\ttiddlers[title] = Object.create(null);\n\t\t\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/State/Suppressed\");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n\treturn messages;\n};\n\n})();\n",
            "title": "$:/core/modules/upgraders/system.js",
            "type": "application/javascript",
            "module-type": "upgrader"
        },
        "$:/core/modules/upgraders/themetweaks.js": {
            "text": "/*\\\ntitle: $:/core/modules/upgraders/themetweaks.js\ntype: application/javascript\nmodule-type: upgrader\n\nUpgrader module that handles the change in theme tweak storage introduced in 5.0.14-beta.\n\nPreviously, theme tweaks were stored in two data tiddlers:\n\n* $:/themes/tiddlywiki/vanilla/metrics\n* $:/themes/tiddlywiki/vanilla/settings\n\nNow, each tweak is stored in its own separate tiddler.\n\nThis upgrader copies any values from the old format to the new. The old data tiddlers are not deleted in case they have been used to store additional indexes.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar MAPPINGS = {\n\t\"$:/themes/tiddlywiki/vanilla/metrics\": {\n\t\t\"fontsize\": \"$:/themes/tiddlywiki/vanilla/metrics/fontsize\",\n\t\t\"lineheight\": \"$:/themes/tiddlywiki/vanilla/metrics/lineheight\",\n\t\t\"storyleft\": \"$:/themes/tiddlywiki/vanilla/metrics/storyleft\",\n\t\t\"storytop\": \"$:/themes/tiddlywiki/vanilla/metrics/storytop\",\n\t\t\"storyright\": \"$:/themes/tiddlywiki/vanilla/metrics/storyright\",\n\t\t\"storywidth\": \"$:/themes/tiddlywiki/vanilla/metrics/storywidth\",\n\t\t\"tiddlerwidth\": \"$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth\"\n\t},\n\t\"$:/themes/tiddlywiki/vanilla/settings\": {\n\t\t\"fontfamily\": \"$:/themes/tiddlywiki/vanilla/settings/fontfamily\"\n\t}\n};\n\nexports.upgrade = function(wiki,titles,tiddlers) {\n\tvar self = this,\n\t\tmessages = {};\n\t// Check for tiddlers on our list\n\t$tw.utils.each(titles,function(title) {\n\t\tvar mapping = MAPPINGS[title];\n\t\tif(mapping) {\n\t\t\tvar tiddler = new $tw.Tiddler(tiddlers[title]),\n\t\t\t\ttiddlerData = wiki.getTiddlerDataCached(tiddler,{});\n\t\t\tfor(var index in mapping) {\n\t\t\t\tvar mappedTitle = mapping[index];\n\t\t\t\tif(!tiddlers[mappedTitle] || tiddlers[mappedTitle].title !== mappedTitle) {\n\t\t\t\t\ttiddlers[mappedTitle] = {\n\t\t\t\t\t\ttitle: mappedTitle,\n\t\t\t\t\t\ttext: tiddlerData[index]\n\t\t\t\t\t};\n\t\t\t\t\tmessages[mappedTitle] = $tw.language.getString(\"Import/Upgrader/ThemeTweaks/Created\",{variables: {\n\t\t\t\t\t\tfrom: title + \"##\" + index\n\t\t\t\t\t}});\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n\treturn messages;\n};\n\n})();\n",
            "title": "$:/core/modules/upgraders/themetweaks.js",
            "type": "application/javascript",
            "module-type": "upgrader"
        },
        "$:/core/modules/utils/crypto.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/crypto.js\ntype: application/javascript\nmodule-type: utils\n\nUtility functions related to crypto.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nLook for an encrypted store area in the text of a TiddlyWiki file\n*/\nexports.extractEncryptedStoreArea = function(text) {\n\tvar encryptedStoreAreaStartMarker = \"<pre id=\\\"encryptedStoreArea\\\" type=\\\"text/plain\\\" style=\\\"display:none;\\\">\",\n\t\tencryptedStoreAreaStart = text.indexOf(encryptedStoreAreaStartMarker);\n\tif(encryptedStoreAreaStart !== -1) {\n\t\tvar encryptedStoreAreaEnd = text.indexOf(\"</pre>\",encryptedStoreAreaStart);\n\t\tif(encryptedStoreAreaEnd !== -1) {\n\t\t\treturn $tw.utils.htmlDecode(text.substring(encryptedStoreAreaStart + encryptedStoreAreaStartMarker.length,encryptedStoreAreaEnd-1));\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nAttempt to extract the tiddlers from an encrypted store area using the current password. If the password is not provided then the password in the password store will be used\n*/\nexports.decryptStoreArea = function(encryptedStoreArea,password) {\n\tvar decryptedText = $tw.crypto.decrypt(encryptedStoreArea,password);\n\tif(decryptedText) {\n\t\tvar json = JSON.parse(decryptedText),\n\t\t\ttiddlers = [];\n\t\tfor(var title in json) {\n\t\t\tif(title !== \"$:/isEncrypted\") {\n\t\t\t\ttiddlers.push(json[title]);\n\t\t\t}\n\t\t}\n\t\treturn tiddlers;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n\n/*\nAttempt to extract the tiddlers from an encrypted store area using the current password. If that fails, the user is prompted for a password.\nencryptedStoreArea: text of the TiddlyWiki encrypted store area\ncallback: function(tiddlers) called with the array of decrypted tiddlers\n\nThe following configuration settings are supported:\n\n$tw.config.usePasswordVault: causes any password entered by the user to also be put into the system password vault\n*/\nexports.decryptStoreAreaInteractive = function(encryptedStoreArea,callback,options) {\n\t// Try to decrypt with the current password\n\tvar tiddlers = $tw.utils.decryptStoreArea(encryptedStoreArea);\n\tif(tiddlers) {\n\t\tcallback(tiddlers);\n\t} else {\n\t\t// Prompt for a new password and keep trying\n\t\t$tw.passwordPrompt.createPrompt({\n\t\t\tserviceName: \"Enter a password to decrypt the imported TiddlyWiki\",\n\t\t\tnoUserName: true,\n\t\t\tcanCancel: true,\n\t\t\tsubmitText: \"Decrypt\",\n\t\t\tcallback: function(data) {\n\t\t\t\t// Exit if the user cancelled\n\t\t\t\tif(!data) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t\t// Attempt to decrypt the tiddlers\n\t\t\t\tvar tiddlers = $tw.utils.decryptStoreArea(encryptedStoreArea,data.password);\n\t\t\t\tif(tiddlers) {\n\t\t\t\t\tif($tw.config.usePasswordVault) {\n\t\t\t\t\t\t$tw.crypto.setPassword(data.password);\n\t\t\t\t\t}\n\t\t\t\t\tcallback(tiddlers);\n\t\t\t\t\t// Exit and remove the password prompt\n\t\t\t\t\treturn true;\n\t\t\t\t} else {\n\t\t\t\t\t// We didn't decrypt everything, so continue to prompt for password\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/utils/crypto.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/animations/slide.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/animations/slide.js\ntype: application/javascript\nmodule-type: animation\n\nA simple slide animation that varies the height of the element\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nfunction slideOpen(domNode,options) {\n\toptions = options || {};\n\tvar duration = options.duration || $tw.utils.getAnimationDuration();\n\t// Get the current height of the domNode\n\tvar computedStyle = window.getComputedStyle(domNode),\n\t\tcurrMarginBottom = parseInt(computedStyle.marginBottom,10),\n\t\tcurrMarginTop = parseInt(computedStyle.marginTop,10),\n\t\tcurrPaddingBottom = parseInt(computedStyle.paddingBottom,10),\n\t\tcurrPaddingTop = parseInt(computedStyle.paddingTop,10),\n\t\tcurrHeight = domNode.offsetHeight;\n\t// Reset the margin once the transition is over\n\tsetTimeout(function() {\n\t\t$tw.utils.setStyle(domNode,[\n\t\t\t{transition: \"none\"},\n\t\t\t{marginBottom: \"\"},\n\t\t\t{marginTop: \"\"},\n\t\t\t{paddingBottom: \"\"},\n\t\t\t{paddingTop: \"\"},\n\t\t\t{height: \"auto\"},\n\t\t\t{opacity: \"\"}\n\t\t]);\n\t\tif(options.callback) {\n\t\t\toptions.callback();\n\t\t}\n\t},duration);\n\t// Set up the initial position of the element\n\t$tw.utils.setStyle(domNode,[\n\t\t{transition: \"none\"},\n\t\t{marginTop: \"0px\"},\n\t\t{marginBottom: \"0px\"},\n\t\t{paddingTop: \"0px\"},\n\t\t{paddingBottom: \"0px\"},\n\t\t{height: \"0px\"},\n\t\t{opacity: \"0\"}\n\t]);\n\t$tw.utils.forceLayout(domNode);\n\t// Transition to the final position\n\t$tw.utils.setStyle(domNode,[\n\t\t{transition: \"margin-top \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"margin-bottom \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"padding-top \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"padding-bottom \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"height \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms ease-in-out\"},\n\t\t{marginBottom: currMarginBottom + \"px\"},\n\t\t{marginTop: currMarginTop + \"px\"},\n\t\t{paddingBottom: currPaddingBottom + \"px\"},\n\t\t{paddingTop: currPaddingTop + \"px\"},\n\t\t{height: currHeight + \"px\"},\n\t\t{opacity: \"1\"}\n\t]);\n}\n\nfunction slideClosed(domNode,options) {\n\toptions = options || {};\n\tvar duration = options.duration || $tw.utils.getAnimationDuration(),\n\t\tcurrHeight = domNode.offsetHeight;\n\t// Clear the properties we've set when the animation is over\n\tsetTimeout(function() {\n\t\t$tw.utils.setStyle(domNode,[\n\t\t\t{transition: \"none\"},\n\t\t\t{marginBottom: \"\"},\n\t\t\t{marginTop: \"\"},\n\t\t\t{paddingBottom: \"\"},\n\t\t\t{paddingTop: \"\"},\n\t\t\t{height: \"auto\"},\n\t\t\t{opacity: \"\"}\n\t\t]);\n\t\tif(options.callback) {\n\t\t\toptions.callback();\n\t\t}\n\t},duration);\n\t// Set up the initial position of the element\n\t$tw.utils.setStyle(domNode,[\n\t\t{height: currHeight + \"px\"},\n\t\t{opacity: \"1\"}\n\t]);\n\t$tw.utils.forceLayout(domNode);\n\t// Transition to the final position\n\t$tw.utils.setStyle(domNode,[\n\t\t{transition: \"margin-top \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"margin-bottom \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"padding-top \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"padding-bottom \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"height \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms ease-in-out\"},\n\t\t{marginTop: \"0px\"},\n\t\t{marginBottom: \"0px\"},\n\t\t{paddingTop: \"0px\"},\n\t\t{paddingBottom: \"0px\"},\n\t\t{height: \"0px\"},\n\t\t{opacity: \"0\"}\n\t]);\n}\n\nexports.slide = {\n\topen: slideOpen,\n\tclose: slideClosed\n};\n\n})();\n",
            "title": "$:/core/modules/utils/dom/animations/slide.js",
            "type": "application/javascript",
            "module-type": "animation"
        },
        "$:/core/modules/utils/dom/animator.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/animator.js\ntype: application/javascript\nmodule-type: utils\n\nOrchestrates animations and transitions\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nfunction Animator() {\n\t// Get the registered animation modules\n\tthis.animations = {};\n\t$tw.modules.applyMethods(\"animation\",this.animations);\n}\n\nAnimator.prototype.perform = function(type,domNode,options) {\n\toptions = options || {};\n\t// Find an animation that can handle this type\n\tvar chosenAnimation;\n\t$tw.utils.each(this.animations,function(animation,name) {\n\t\tif($tw.utils.hop(animation,type)) {\n\t\t\tchosenAnimation = animation[type];\n\t\t}\n\t});\n\tif(!chosenAnimation) {\n\t\tchosenAnimation = function(domNode,options) {\n\t\t\tif(options.callback) {\n\t\t\t\toptions.callback();\n\t\t\t}\n\t\t};\n\t}\n\t// Call the animation\n\tchosenAnimation(domNode,options);\n};\n\nexports.Animator = Animator;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/animator.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/browser.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/browser.js\ntype: application/javascript\nmodule-type: utils\n\nBrowser feature detection\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSet style properties of an element\n\telement: dom node\n\tstyles: ordered array of {name: value} pairs\n*/\nexports.setStyle = function(element,styles) {\n\tif(element.nodeType === 1) { // Element.ELEMENT_NODE\n\t\tfor(var t=0; t<styles.length; t++) {\n\t\t\tfor(var styleName in styles[t]) {\n\t\t\t\telement.style[$tw.utils.convertStyleNameToPropertyName(styleName)] = styles[t][styleName];\n\t\t\t}\n\t\t}\n\t}\n};\n\n/*\nConverts a standard CSS property name into the local browser-specific equivalent. For example:\n\t\"background-color\" --> \"backgroundColor\"\n\t\"transition\" --> \"webkitTransition\"\n*/\n\nvar styleNameCache = {}; // We'll cache the style name conversions\n\nexports.convertStyleNameToPropertyName = function(styleName) {\n\t// Return from the cache if we can\n\tif(styleNameCache[styleName]) {\n\t\treturn styleNameCache[styleName];\n\t}\n\t// Convert it by first removing any hyphens\n\tvar propertyName = $tw.utils.unHyphenateCss(styleName);\n\t// Then check if it needs a prefix\n\tif($tw.browser && document.body.style[propertyName] === undefined) {\n\t\tvar prefixes = [\"O\",\"MS\",\"Moz\",\"webkit\"];\n\t\tfor(var t=0; t<prefixes.length; t++) {\n\t\t\tvar prefixedName = prefixes[t] + propertyName.substr(0,1).toUpperCase() + propertyName.substr(1);\n\t\t\tif(document.body.style[prefixedName] !== undefined) {\n\t\t\t\tpropertyName = prefixedName;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\t// Put it in the cache too\n\tstyleNameCache[styleName] = propertyName;\n\treturn propertyName;\n};\n\n/*\nConverts a JS format CSS property name back into the dashed form used in CSS declarations. For example:\n\t\"backgroundColor\" --> \"background-color\"\n\t\"webkitTransform\" --> \"-webkit-transform\"\n*/\nexports.convertPropertyNameToStyleName = function(propertyName) {\n\t// Rehyphenate the name\n\tvar styleName = $tw.utils.hyphenateCss(propertyName);\n\t// If there's a webkit prefix, add a dash (other browsers have uppercase prefixes, and so get the dash automatically)\n\tif(styleName.indexOf(\"webkit\") === 0) {\n\t\tstyleName = \"-\" + styleName;\n\t} else if(styleName.indexOf(\"-m-s\") === 0) {\n\t\tstyleName = \"-ms\" + styleName.substr(4);\n\t}\n\treturn styleName;\n};\n\n/*\nRound trip a stylename to a property name and back again. For example:\n\t\"transform\" --> \"webkitTransform\" --> \"-webkit-transform\"\n*/\nexports.roundTripPropertyName = function(propertyName) {\n\treturn $tw.utils.convertPropertyNameToStyleName($tw.utils.convertStyleNameToPropertyName(propertyName));\n};\n\n/*\nConverts a standard event name into the local browser specific equivalent. For example:\n\t\"animationEnd\" --> \"webkitAnimationEnd\"\n*/\n\nvar eventNameCache = {}; // We'll cache the conversions\n\nvar eventNameMappings = {\n\t\"transitionEnd\": {\n\t\tcorrespondingCssProperty: \"transition\",\n\t\tmappings: {\n\t\t\ttransition: \"transitionend\",\n\t\t\tOTransition: \"oTransitionEnd\",\n\t\t\tMSTransition: \"msTransitionEnd\",\n\t\t\tMozTransition: \"transitionend\",\n\t\t\twebkitTransition: \"webkitTransitionEnd\"\n\t\t}\n\t},\n\t\"animationEnd\": {\n\t\tcorrespondingCssProperty: \"animation\",\n\t\tmappings: {\n\t\t\tanimation: \"animationend\",\n\t\t\tOAnimation: \"oAnimationEnd\",\n\t\t\tMSAnimation: \"msAnimationEnd\",\n\t\t\tMozAnimation: \"animationend\",\n\t\t\twebkitAnimation: \"webkitAnimationEnd\"\n\t\t}\n\t}\n};\n\nexports.convertEventName = function(eventName) {\n\tif(eventNameCache[eventName]) {\n\t\treturn eventNameCache[eventName];\n\t}\n\tvar newEventName = eventName,\n\t\tmappings = eventNameMappings[eventName];\n\tif(mappings) {\n\t\tvar convertedProperty = $tw.utils.convertStyleNameToPropertyName(mappings.correspondingCssProperty);\n\t\tif(mappings.mappings[convertedProperty]) {\n\t\t\tnewEventName = mappings.mappings[convertedProperty];\n\t\t}\n\t}\n\t// Put it in the cache too\n\teventNameCache[eventName] = newEventName;\n\treturn newEventName;\n};\n\n/*\nReturn the names of the fullscreen APIs\n*/\nexports.getFullScreenApis = function() {\n\tvar d = document,\n\t\tdb = d.body,\n\t\tresult = {\n\t\t\"_requestFullscreen\": db.webkitRequestFullscreen !== undefined ? \"webkitRequestFullscreen\" :\n\t\t\t\t\t\t\tdb.mozRequestFullScreen !== undefined ? \"mozRequestFullScreen\" :\n\t\t\t\t\t\t\tdb.msRequestFullscreen !== undefined ? \"msRequestFullscreen\" :\n\t\t\t\t\t\t\tdb.requestFullscreen !== undefined ? \"requestFullscreen\" : \"\",\n\t\t\"_exitFullscreen\": d.webkitExitFullscreen !== undefined ? \"webkitExitFullscreen\" :\n\t\t\t\t\t\t\td.mozCancelFullScreen !== undefined ? \"mozCancelFullScreen\" :\n\t\t\t\t\t\t\td.msExitFullscreen !== undefined ? \"msExitFullscreen\" :\n\t\t\t\t\t\t\td.exitFullscreen !== undefined ? \"exitFullscreen\" : \"\",\n\t\t\"_fullscreenElement\": d.webkitFullscreenElement !== undefined ? \"webkitFullscreenElement\" :\n\t\t\t\t\t\t\td.mozFullScreenElement !== undefined ? \"mozFullScreenElement\" :\n\t\t\t\t\t\t\td.msFullscreenElement !== undefined ? \"msFullscreenElement\" :\n\t\t\t\t\t\t\td.fullscreenElement !== undefined ? \"fullscreenElement\" : \"\",\n\t\t\"_fullscreenChange\": d.webkitFullscreenElement !== undefined ? \"webkitfullscreenchange\" :\n\t\t\t\t\t\t\td.mozFullScreenElement !== undefined ? \"mozfullscreenchange\" :\n\t\t\t\t\t\t\td.msFullscreenElement !== undefined ? \"MSFullscreenChange\" :\n\t\t\t\t\t\t\td.fullscreenElement !== undefined ? \"fullscreenchange\" : \"\"\n\t};\n\tif(!result._requestFullscreen || !result._exitFullscreen || !result._fullscreenElement || !result._fullscreenChange) {\n\t\treturn null;\n\t} else {\n\t\treturn result;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/utils/dom/browser.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/csscolorparser.js": {
            "text": "// (c) Dean McNamee <dean@gmail.com>, 2012.\n//\n// https://github.com/deanm/css-color-parser-js\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to\n// deal in the Software without restriction, including without limitation the\n// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n// sell copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n// IN THE SOFTWARE.\n\n// http://www.w3.org/TR/css3-color/\nvar kCSSColorTable = {\n  \"transparent\": [0,0,0,0], \"aliceblue\": [240,248,255,1],\n  \"antiquewhite\": [250,235,215,1], \"aqua\": [0,255,255,1],\n  \"aquamarine\": [127,255,212,1], \"azure\": [240,255,255,1],\n  \"beige\": [245,245,220,1], \"bisque\": [255,228,196,1],\n  \"black\": [0,0,0,1], \"blanchedalmond\": [255,235,205,1],\n  \"blue\": [0,0,255,1], \"blueviolet\": [138,43,226,1],\n  \"brown\": [165,42,42,1], \"burlywood\": [222,184,135,1],\n  \"cadetblue\": [95,158,160,1], \"chartreuse\": [127,255,0,1],\n  \"chocolate\": [210,105,30,1], \"coral\": [255,127,80,1],\n  \"cornflowerblue\": [100,149,237,1], \"cornsilk\": [255,248,220,1],\n  \"crimson\": [220,20,60,1], \"cyan\": [0,255,255,1],\n  \"darkblue\": [0,0,139,1], \"darkcyan\": [0,139,139,1],\n  \"darkgoldenrod\": [184,134,11,1], \"darkgray\": [169,169,169,1],\n  \"darkgreen\": [0,100,0,1], \"darkgrey\": [169,169,169,1],\n  \"darkkhaki\": [189,183,107,1], \"darkmagenta\": [139,0,139,1],\n  \"darkolivegreen\": [85,107,47,1], \"darkorange\": [255,140,0,1],\n  \"darkorchid\": [153,50,204,1], \"darkred\": [139,0,0,1],\n  \"darksalmon\": [233,150,122,1], \"darkseagreen\": [143,188,143,1],\n  \"darkslateblue\": [72,61,139,1], \"darkslategray\": [47,79,79,1],\n  \"darkslategrey\": [47,79,79,1], \"darkturquoise\": [0,206,209,1],\n  \"darkviolet\": [148,0,211,1], \"deeppink\": [255,20,147,1],\n  \"deepskyblue\": [0,191,255,1], \"dimgray\": [105,105,105,1],\n  \"dimgrey\": [105,105,105,1], \"dodgerblue\": [30,144,255,1],\n  \"firebrick\": [178,34,34,1], \"floralwhite\": [255,250,240,1],\n  \"forestgreen\": [34,139,34,1], \"fuchsia\": [255,0,255,1],\n  \"gainsboro\": [220,220,220,1], \"ghostwhite\": [248,248,255,1],\n  \"gold\": [255,215,0,1], \"goldenrod\": [218,165,32,1],\n  \"gray\": [128,128,128,1], \"green\": [0,128,0,1],\n  \"greenyellow\": [173,255,47,1], \"grey\": [128,128,128,1],\n  \"honeydew\": [240,255,240,1], \"hotpink\": [255,105,180,1],\n  \"indianred\": [205,92,92,1], \"indigo\": [75,0,130,1],\n  \"ivory\": [255,255,240,1], \"khaki\": [240,230,140,1],\n  \"lavender\": [230,230,250,1], \"lavenderblush\": [255,240,245,1],\n  \"lawngreen\": [124,252,0,1], \"lemonchiffon\": [255,250,205,1],\n  \"lightblue\": [173,216,230,1], \"lightcoral\": [240,128,128,1],\n  \"lightcyan\": [224,255,255,1], \"lightgoldenrodyellow\": [250,250,210,1],\n  \"lightgray\": [211,211,211,1], \"lightgreen\": [144,238,144,1],\n  \"lightgrey\": [211,211,211,1], \"lightpink\": [255,182,193,1],\n  \"lightsalmon\": [255,160,122,1], \"lightseagreen\": [32,178,170,1],\n  \"lightskyblue\": [135,206,250,1], \"lightslategray\": [119,136,153,1],\n  \"lightslategrey\": [119,136,153,1], \"lightsteelblue\": [176,196,222,1],\n  \"lightyellow\": [255,255,224,1], \"lime\": [0,255,0,1],\n  \"limegreen\": [50,205,50,1], \"linen\": [250,240,230,1],\n  \"magenta\": [255,0,255,1], \"maroon\": [128,0,0,1],\n  \"mediumaquamarine\": [102,205,170,1], \"mediumblue\": [0,0,205,1],\n  \"mediumorchid\": [186,85,211,1], \"mediumpurple\": [147,112,219,1],\n  \"mediumseagreen\": [60,179,113,1], \"mediumslateblue\": [123,104,238,1],\n  \"mediumspringgreen\": [0,250,154,1], \"mediumturquoise\": [72,209,204,1],\n  \"mediumvioletred\": [199,21,133,1], \"midnightblue\": [25,25,112,1],\n  \"mintcream\": [245,255,250,1], \"mistyrose\": [255,228,225,1],\n  \"moccasin\": [255,228,181,1], \"navajowhite\": [255,222,173,1],\n  \"navy\": [0,0,128,1], \"oldlace\": [253,245,230,1],\n  \"olive\": [128,128,0,1], \"olivedrab\": [107,142,35,1],\n  \"orange\": [255,165,0,1], \"orangered\": [255,69,0,1],\n  \"orchid\": [218,112,214,1], \"palegoldenrod\": [238,232,170,1],\n  \"palegreen\": [152,251,152,1], \"paleturquoise\": [175,238,238,1],\n  \"palevioletred\": [219,112,147,1], \"papayawhip\": [255,239,213,1],\n  \"peachpuff\": [255,218,185,1], \"peru\": [205,133,63,1],\n  \"pink\": [255,192,203,1], \"plum\": [221,160,221,1],\n  \"powderblue\": [176,224,230,1], \"purple\": [128,0,128,1],\n  \"red\": [255,0,0,1], \"rosybrown\": [188,143,143,1],\n  \"royalblue\": [65,105,225,1], \"saddlebrown\": [139,69,19,1],\n  \"salmon\": [250,128,114,1], \"sandybrown\": [244,164,96,1],\n  \"seagreen\": [46,139,87,1], \"seashell\": [255,245,238,1],\n  \"sienna\": [160,82,45,1], \"silver\": [192,192,192,1],\n  \"skyblue\": [135,206,235,1], \"slateblue\": [106,90,205,1],\n  \"slategray\": [112,128,144,1], \"slategrey\": [112,128,144,1],\n  \"snow\": [255,250,250,1], \"springgreen\": [0,255,127,1],\n  \"steelblue\": [70,130,180,1], \"tan\": [210,180,140,1],\n  \"teal\": [0,128,128,1], \"thistle\": [216,191,216,1],\n  \"tomato\": [255,99,71,1], \"turquoise\": [64,224,208,1],\n  \"violet\": [238,130,238,1], \"wheat\": [245,222,179,1],\n  \"white\": [255,255,255,1], \"whitesmoke\": [245,245,245,1],\n  \"yellow\": [255,255,0,1], \"yellowgreen\": [154,205,50,1]}\n\nfunction clamp_css_byte(i) {  // Clamp to integer 0 .. 255.\n  i = Math.round(i);  // Seems to be what Chrome does (vs truncation).\n  return i < 0 ? 0 : i > 255 ? 255 : i;\n}\n\nfunction clamp_css_float(f) {  // Clamp to float 0.0 .. 1.0.\n  return f < 0 ? 0 : f > 1 ? 1 : f;\n}\n\nfunction parse_css_int(str) {  // int or percentage.\n  if (str[str.length - 1] === '%')\n    return clamp_css_byte(parseFloat(str) / 100 * 255);\n  return clamp_css_byte(parseInt(str));\n}\n\nfunction parse_css_float(str) {  // float or percentage.\n  if (str[str.length - 1] === '%')\n    return clamp_css_float(parseFloat(str) / 100);\n  return clamp_css_float(parseFloat(str));\n}\n\nfunction css_hue_to_rgb(m1, m2, h) {\n  if (h < 0) h += 1;\n  else if (h > 1) h -= 1;\n\n  if (h * 6 < 1) return m1 + (m2 - m1) * h * 6;\n  if (h * 2 < 1) return m2;\n  if (h * 3 < 2) return m1 + (m2 - m1) * (2/3 - h) * 6;\n  return m1;\n}\n\nfunction parseCSSColor(css_str) {\n  // Remove all whitespace, not compliant, but should just be more accepting.\n  var str = css_str.replace(/ /g, '').toLowerCase();\n\n  // Color keywords (and transparent) lookup.\n  if (str in kCSSColorTable) return kCSSColorTable[str].slice();  // dup.\n\n  // #abc and #abc123 syntax.\n  if (str[0] === '#') {\n    if (str.length === 4) {\n      var iv = parseInt(str.substr(1), 16);  // TODO(deanm): Stricter parsing.\n      if (!(iv >= 0 && iv <= 0xfff)) return null;  // Covers NaN.\n      return [((iv & 0xf00) >> 4) | ((iv & 0xf00) >> 8),\n              (iv & 0xf0) | ((iv & 0xf0) >> 4),\n              (iv & 0xf) | ((iv & 0xf) << 4),\n              1];\n    } else if (str.length === 7) {\n      var iv = parseInt(str.substr(1), 16);  // TODO(deanm): Stricter parsing.\n      if (!(iv >= 0 && iv <= 0xffffff)) return null;  // Covers NaN.\n      return [(iv & 0xff0000) >> 16,\n              (iv & 0xff00) >> 8,\n              iv & 0xff,\n              1];\n    }\n\n    return null;\n  }\n\n  var op = str.indexOf('('), ep = str.indexOf(')');\n  if (op !== -1 && ep + 1 === str.length) {\n    var fname = str.substr(0, op);\n    var params = str.substr(op+1, ep-(op+1)).split(',');\n    var alpha = 1;  // To allow case fallthrough.\n    switch (fname) {\n      case 'rgba':\n        if (params.length !== 4) return null;\n        alpha = parse_css_float(params.pop());\n        // Fall through.\n      case 'rgb':\n        if (params.length !== 3) return null;\n        return [parse_css_int(params[0]),\n                parse_css_int(params[1]),\n                parse_css_int(params[2]),\n                alpha];\n      case 'hsla':\n        if (params.length !== 4) return null;\n        alpha = parse_css_float(params.pop());\n        // Fall through.\n      case 'hsl':\n        if (params.length !== 3) return null;\n        var h = (((parseFloat(params[0]) % 360) + 360) % 360) / 360;  // 0 .. 1\n        // NOTE(deanm): According to the CSS spec s/l should only be\n        // percentages, but we don't bother and let float or percentage.\n        var s = parse_css_float(params[1]);\n        var l = parse_css_float(params[2]);\n        var m2 = l <= 0.5 ? l * (s + 1) : l + s - l * s;\n        var m1 = l * 2 - m2;\n        return [clamp_css_byte(css_hue_to_rgb(m1, m2, h+1/3) * 255),\n                clamp_css_byte(css_hue_to_rgb(m1, m2, h) * 255),\n                clamp_css_byte(css_hue_to_rgb(m1, m2, h-1/3) * 255),\n                alpha];\n      default:\n        return null;\n    }\n  }\n\n  return null;\n}\n\ntry { exports.parseCSSColor = parseCSSColor } catch(e) { }\n",
            "title": "$:/core/modules/utils/dom/csscolorparser.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom.js\ntype: application/javascript\nmodule-type: utils\n\nVarious static DOM-related utility functions.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nDetermines whether element 'a' contains element 'b'\nCode thanks to John Resig, http://ejohn.org/blog/comparing-document-position/\n*/\nexports.domContains = function(a,b) {\n\treturn a.contains ?\n\t\ta !== b && a.contains(b) :\n\t\t!!(a.compareDocumentPosition(b) & 16);\n};\n\nexports.removeChildren = function(node) {\n\twhile(node.hasChildNodes()) {\n\t\tnode.removeChild(node.firstChild);\n\t}\n};\n\nexports.hasClass = function(el,className) {\n\treturn el && el.className && el.className.toString().split(\" \").indexOf(className) !== -1;\n};\n\nexports.addClass = function(el,className) {\n\tvar c = el.className.split(\" \");\n\tif(c.indexOf(className) === -1) {\n\t\tc.push(className);\n\t}\n\tel.className = c.join(\" \");\n};\n\nexports.removeClass = function(el,className) {\n\tvar c = el.className.split(\" \"),\n\t\tp = c.indexOf(className);\n\tif(p !== -1) {\n\t\tc.splice(p,1);\n\t\tel.className = c.join(\" \");\n\t}\n};\n\nexports.toggleClass = function(el,className,status) {\n\tif(status === undefined) {\n\t\tstatus = !exports.hasClass(el,className);\n\t}\n\tif(status) {\n\t\texports.addClass(el,className);\n\t} else {\n\t\texports.removeClass(el,className);\n\t}\n};\n\n/*\nGet the first parent element that has scrollbars or use the body as fallback.\n*/\nexports.getScrollContainer = function(el) {\n\tvar doc = el.ownerDocument;\n\twhile(el.parentNode) {\t\n\t\tel = el.parentNode;\n\t\tif(el.scrollTop) {\n\t\t\treturn el;\n\t\t}\n\t}\n\treturn doc.body;\n};\n\n/*\nGet the scroll position of the viewport\nReturns:\n\t{\n\t\tx: horizontal scroll position in pixels,\n\t\ty: vertical scroll position in pixels\n\t}\n*/\nexports.getScrollPosition = function() {\n\tif(\"scrollX\" in window) {\n\t\treturn {x: window.scrollX, y: window.scrollY};\n\t} else {\n\t\treturn {x: document.documentElement.scrollLeft, y: document.documentElement.scrollTop};\n\t}\n};\n\n/*\nAdjust the height of a textarea to fit its content, preserving scroll position, and return the height\n*/\nexports.resizeTextAreaToFit = function(domNode,minHeight) {\n\t// Get the scroll container and register the current scroll position\n\tvar container = $tw.utils.getScrollContainer(domNode),\n\t\tscrollTop = container.scrollTop;\n    // Measure the specified minimum height\n\tdomNode.style.height = minHeight;\n\tvar measuredHeight = domNode.offsetHeight;\n\t// Set its height to auto so that it snaps to the correct height\n\tdomNode.style.height = \"auto\";\n\t// Calculate the revised height\n\tvar newHeight = Math.max(domNode.scrollHeight + domNode.offsetHeight - domNode.clientHeight,measuredHeight);\n\t// Only try to change the height if it has changed\n\tif(newHeight !== domNode.offsetHeight) {\n\t\tdomNode.style.height = newHeight + \"px\";\n\t\t// Make sure that the dimensions of the textarea are recalculated\n\t\t$tw.utils.forceLayout(domNode);\n\t\t// Set the container to the position we registered at the beginning\n\t\tcontainer.scrollTop = scrollTop;\n\t}\n\treturn newHeight;\n};\n\n/*\nGets the bounding rectangle of an element in absolute page coordinates\n*/\nexports.getBoundingPageRect = function(element) {\n\tvar scrollPos = $tw.utils.getScrollPosition(),\n\t\tclientRect = element.getBoundingClientRect();\n\treturn {\n\t\tleft: clientRect.left + scrollPos.x,\n\t\twidth: clientRect.width,\n\t\tright: clientRect.right + scrollPos.x,\n\t\ttop: clientRect.top + scrollPos.y,\n\t\theight: clientRect.height,\n\t\tbottom: clientRect.bottom + scrollPos.y\n\t};\n};\n\n/*\nSaves a named password in the browser\n*/\nexports.savePassword = function(name,password) {\n\ttry {\n\t\tif(window.localStorage) {\n\t\t\tlocalStorage.setItem(\"tw5-password-\" + name,password);\n\t\t}\n\t} catch(e) {\n\t}\n};\n\n/*\nRetrieve a named password from the browser\n*/\nexports.getPassword = function(name) {\n\ttry {\n\t\treturn window.localStorage ? localStorage.getItem(\"tw5-password-\" + name) : \"\";\n\t} catch(e) {\n\t\treturn \"\";\n\t}\n};\n\n/*\nForce layout of a dom node and its descendents\n*/\nexports.forceLayout = function(element) {\n\tvar dummy = element.offsetWidth;\n};\n\n/*\nPulse an element for debugging purposes\n*/\nexports.pulseElement = function(element) {\n\t// Event handler to remove the class at the end\n\telement.addEventListener($tw.browser.animationEnd,function handler(event) {\n\t\telement.removeEventListener($tw.browser.animationEnd,handler,false);\n\t\t$tw.utils.removeClass(element,\"pulse\");\n\t},false);\n\t// Apply the pulse class\n\t$tw.utils.removeClass(element,\"pulse\");\n\t$tw.utils.forceLayout(element);\n\t$tw.utils.addClass(element,\"pulse\");\n};\n\n/*\nAttach specified event handlers to a DOM node\ndomNode: where to attach the event handlers\nevents: array of event handlers to be added (see below)\nEach entry in the events array is an object with these properties:\nhandlerFunction: optional event handler function\nhandlerObject: optional event handler object\nhandlerMethod: optionally specifies object handler method name (defaults to `handleEvent`)\n*/\nexports.addEventListeners = function(domNode,events) {\n\t$tw.utils.each(events,function(eventInfo) {\n\t\tvar handler;\n\t\tif(eventInfo.handlerFunction) {\n\t\t\thandler = eventInfo.handlerFunction;\n\t\t} else if(eventInfo.handlerObject) {\n\t\t\tif(eventInfo.handlerMethod) {\n\t\t\t\thandler = function(event) {\n\t\t\t\t\teventInfo.handlerObject[eventInfo.handlerMethod].call(eventInfo.handlerObject,event);\n\t\t\t\t};\t\n\t\t\t} else {\n\t\t\t\thandler = eventInfo.handlerObject;\n\t\t\t}\n\t\t}\n\t\tdomNode.addEventListener(eventInfo.name,handler,false);\n\t});\n};\n\n/*\nGet the computed styles applied to an element as an array of strings of individual CSS properties\n*/\nexports.getComputedStyles = function(domNode) {\n\tvar textAreaStyles = window.getComputedStyle(domNode,null),\n\t\tstyleDefs = [],\n\t\tname;\n\tfor(var t=0; t<textAreaStyles.length; t++) {\n\t\tname = textAreaStyles[t];\n\t\tstyleDefs.push(name + \": \" + textAreaStyles.getPropertyValue(name) + \";\");\n\t}\n\treturn styleDefs;\n};\n\n/*\nApply a set of styles passed as an array of strings of individual CSS properties\n*/\nexports.setStyles = function(domNode,styleDefs) {\n\tdomNode.style.cssText = styleDefs.join(\"\");\n};\n\n/*\nCopy the computed styles from a source element to a destination element\n*/\nexports.copyStyles = function(srcDomNode,dstDomNode) {\n\t$tw.utils.setStyles(dstDomNode,$tw.utils.getComputedStyles(srcDomNode));\n};\n\n})();\n",
            "title": "$:/core/modules/utils/dom.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/http.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/http.js\ntype: application/javascript\nmodule-type: utils\n\nBrowser HTTP support\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nA quick and dirty HTTP function; to be refactored later. Options are:\n\turl: URL to retrieve\n\ttype: GET, PUT, POST etc\n\tcallback: function invoked with (err,data)\n*/\nexports.httpRequest = function(options) {\n\tvar type = options.type || \"GET\",\n\t\theaders = options.headers || {accept: \"application/json\"},\n\t\trequest = new XMLHttpRequest(),\n\t\tdata = \"\",\n\t\tf,results;\n\t// Massage the data hashmap into a string\n\tif(options.data) {\n\t\tif(typeof options.data === \"string\") { // Already a string\n\t\t\tdata = options.data;\n\t\t} else { // A hashmap of strings\n\t\t\tresults = [];\n\t\t\t$tw.utils.each(options.data,function(dataItem,dataItemTitle) {\n\t\t\t\tresults.push(dataItemTitle + \"=\" + encodeURIComponent(dataItem));\n\t\t\t});\n\t\t\tdata = results.join(\"&\");\n\t\t}\n\t}\n\t// Set up the state change handler\n\trequest.onreadystatechange = function() {\n\t\tif(this.readyState === 4) {\n\t\t\tif(this.status === 200 || this.status === 201 || this.status === 204) {\n\t\t\t\t// Success!\n\t\t\t\toptions.callback(null,this.responseText,this);\n\t\t\t\treturn;\n\t\t\t}\n\t\t// Something went wrong\n\t\toptions.callback($tw.language.getString(\"Error/XMLHttpRequest\") + \": \" + this.status);\n\t\t}\n\t};\n\t// Make the request\n\trequest.open(type,options.url,true);\n\tif(headers) {\n\t\t$tw.utils.each(headers,function(header,headerTitle,object) {\n\t\t\trequest.setRequestHeader(headerTitle,header);\n\t\t});\n\t}\n\tif(data && !$tw.utils.hop(headers,\"Content-type\")) {\n\t\trequest.setRequestHeader(\"Content-type\",\"application/x-www-form-urlencoded; charset=UTF-8\");\n\t}\n\ttry {\n\t\trequest.send(data);\n\t} catch(e) {\n\t\toptions.callback(e);\n\t}\n\treturn request;\n};\n\n})();\n",
            "title": "$:/core/modules/utils/dom/http.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/keyboard.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/keyboard.js\ntype: application/javascript\nmodule-type: utils\n\nKeyboard utilities; now deprecated. Instead, use $tw.keyboardManager\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n[\"parseKeyDescriptor\",\"checkKeyDescriptor\"].forEach(function(method) {\n\texports[method] = function() {\n\t\tif($tw.keyboardManager) {\n\t\t\treturn $tw.keyboardManager[method].apply($tw.keyboardManager,Array.prototype.slice.call(arguments,0));\n\t\t} else {\n\t\t\treturn null\n\t\t}\n\t};\n});\n\n})();\n",
            "title": "$:/core/modules/utils/dom/keyboard.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/modal.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/modal.js\ntype: application/javascript\nmodule-type: utils\n\nModal message mechanism\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nvar Modal = function(wiki) {\n\tthis.wiki = wiki;\n\tthis.modalCount = 0;\n};\n\n/*\nDisplay a modal dialogue\n\ttitle: Title of tiddler to display\n\toptions: see below\nOptions include:\n\tdownloadLink: Text of a big download link to include\n*/\nModal.prototype.display = function(title,options) {\n\toptions = options || {};\n\tvar self = this,\n\t\trefreshHandler,\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\ttiddler = this.wiki.getTiddler(title);\n\t// Don't do anything if the tiddler doesn't exist\n\tif(!tiddler) {\n\t\treturn;\n\t}\n\t// Create the variables\n\tvar variables = $tw.utils.extend({currentTiddler: title},options.variables);\n\t// Create the wrapper divs\n\tvar wrapper = document.createElement(\"div\"),\n\t\tmodalBackdrop = document.createElement(\"div\"),\n\t\tmodalWrapper = document.createElement(\"div\"),\n\t\tmodalHeader = document.createElement(\"div\"),\n\t\theaderTitle = document.createElement(\"h3\"),\n\t\tmodalBody = document.createElement(\"div\"),\n\t\tmodalLink = document.createElement(\"a\"),\n\t\tmodalFooter = document.createElement(\"div\"),\n\t\tmodalFooterHelp = document.createElement(\"span\"),\n\t\tmodalFooterButtons = document.createElement(\"span\");\n\t// Up the modal count and adjust the body class\n\tthis.modalCount++;\n\tthis.adjustPageClass();\n\t// Add classes\n\t$tw.utils.addClass(wrapper,\"tc-modal-wrapper\");\n\t$tw.utils.addClass(modalBackdrop,\"tc-modal-backdrop\");\n\t$tw.utils.addClass(modalWrapper,\"tc-modal\");\n\t$tw.utils.addClass(modalHeader,\"tc-modal-header\");\n\t$tw.utils.addClass(modalBody,\"tc-modal-body\");\n\t$tw.utils.addClass(modalFooter,\"tc-modal-footer\");\n\t// Join them together\n\twrapper.appendChild(modalBackdrop);\n\twrapper.appendChild(modalWrapper);\n\tmodalHeader.appendChild(headerTitle);\n\tmodalWrapper.appendChild(modalHeader);\n\tmodalWrapper.appendChild(modalBody);\n\tmodalFooter.appendChild(modalFooterHelp);\n\tmodalFooter.appendChild(modalFooterButtons);\n\tmodalWrapper.appendChild(modalFooter);\n\t// Render the title of the message\n\tvar headerWidgetNode = this.wiki.makeTranscludeWidget(title,{\n\t\tfield: \"subtitle\",\n\t\tmode: \"inline\",\n\t\tchildren: [{\n\t\t\ttype: \"text\",\n\t\t\tattributes: {\n\t\t\t\ttext: {\n\t\t\t\t\ttype: \"string\",\n\t\t\t\t\tvalue: title\n\t\t}}}],\n\t\tparentWidget: $tw.rootWidget,\n\t\tdocument: document,\n\t\tvariables: variables\n\t});\n\theaderWidgetNode.render(headerTitle,null);\n\t// Render the body of the message\n\tvar bodyWidgetNode = this.wiki.makeTranscludeWidget(title,{\n\t\tparentWidget: $tw.rootWidget,\n\t\tdocument: document,\n\t\tvariables: variables\n\t});\n\tbodyWidgetNode.render(modalBody,null);\n\t// Setup the link if present\n\tif(options.downloadLink) {\n\t\tmodalLink.href = options.downloadLink;\n\t\tmodalLink.appendChild(document.createTextNode(\"Right-click to save changes\"));\n\t\tmodalBody.appendChild(modalLink);\n\t}\n\t// Render the footer of the message\n\tif(tiddler && tiddler.fields && tiddler.fields.help) {\n\t\tvar link = document.createElement(\"a\");\n\t\tlink.setAttribute(\"href\",tiddler.fields.help);\n\t\tlink.setAttribute(\"target\",\"_blank\");\n\t\tlink.setAttribute(\"rel\",\"noopener noreferrer\");\n\t\tlink.appendChild(document.createTextNode(\"Help\"));\n\t\tmodalFooterHelp.appendChild(link);\n\t\tmodalFooterHelp.style.float = \"left\";\n\t}\n\tvar footerWidgetNode = this.wiki.makeTranscludeWidget(title,{\n\t\tfield: \"footer\",\n\t\tmode: \"inline\",\n\t\tchildren: [{\n\t\t\ttype: \"button\",\n\t\t\tattributes: {\n\t\t\t\tmessage: {\n\t\t\t\t\ttype: \"string\",\n\t\t\t\t\tvalue: \"tm-close-tiddler\"\n\t\t\t\t}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\",\n\t\t\t\tattributes: {\n\t\t\t\t\ttext: {\n\t\t\t\t\t\ttype: \"string\",\n\t\t\t\t\t\tvalue: $tw.language.getString(\"Buttons/Close/Caption\")\n\t\t\t}}}\n\t\t]}],\n\t\tparentWidget: $tw.rootWidget,\n\t\tdocument: document,\n\t\tvariables: variables\n\t});\n\tfooterWidgetNode.render(modalFooterButtons,null);\n\t// Set up the refresh handler\n\trefreshHandler = function(changes) {\n\t\theaderWidgetNode.refresh(changes,modalHeader,null);\n\t\tbodyWidgetNode.refresh(changes,modalBody,null);\n\t\tfooterWidgetNode.refresh(changes,modalFooterButtons,null);\n\t};\n\tthis.wiki.addEventListener(\"change\",refreshHandler);\n\t// Add the close event handler\n\tvar closeHandler = function(event) {\n\t\t// Remove our refresh handler\n\t\tself.wiki.removeEventListener(\"change\",refreshHandler);\n\t\t// Decrease the modal count and adjust the body class\n\t\tself.modalCount--;\n\t\tself.adjustPageClass();\n\t\t// Force layout and animate the modal message away\n\t\t$tw.utils.forceLayout(modalBackdrop);\n\t\t$tw.utils.forceLayout(modalWrapper);\n\t\t$tw.utils.setStyle(modalBackdrop,[\n\t\t\t{opacity: \"0\"}\n\t\t]);\n\t\t$tw.utils.setStyle(modalWrapper,[\n\t\t\t{transform: \"translateY(\" + window.innerHeight + \"px)\"}\n\t\t]);\n\t\t// Set up an event for the transition end\n\t\twindow.setTimeout(function() {\n\t\t\tif(wrapper.parentNode) {\n\t\t\t\t// Remove the modal message from the DOM\n\t\t\t\tdocument.body.removeChild(wrapper);\n\t\t\t}\n\t\t},duration);\n\t\t// Don't let anyone else handle the tm-close-tiddler message\n\t\treturn false;\n\t};\n\theaderWidgetNode.addEventListener(\"tm-close-tiddler\",closeHandler,false);\n\tbodyWidgetNode.addEventListener(\"tm-close-tiddler\",closeHandler,false);\n\tfooterWidgetNode.addEventListener(\"tm-close-tiddler\",closeHandler,false);\n\t// Set the initial styles for the message\n\t$tw.utils.setStyle(modalBackdrop,[\n\t\t{opacity: \"0\"}\n\t]);\n\t$tw.utils.setStyle(modalWrapper,[\n\t\t{transformOrigin: \"0% 0%\"},\n\t\t{transform: \"translateY(\" + (-window.innerHeight) + \"px)\"}\n\t]);\n\t// Put the message into the document\n\tdocument.body.appendChild(wrapper);\n\t// Set up animation for the styles\n\t$tw.utils.setStyle(modalBackdrop,[\n\t\t{transition: \"opacity \" + duration + \"ms ease-out\"}\n\t]);\n\t$tw.utils.setStyle(modalWrapper,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms ease-in-out\"}\n\t]);\n\t// Force layout\n\t$tw.utils.forceLayout(modalBackdrop);\n\t$tw.utils.forceLayout(modalWrapper);\n\t// Set final animated styles\n\t$tw.utils.setStyle(modalBackdrop,[\n\t\t{opacity: \"0.7\"}\n\t]);\n\t$tw.utils.setStyle(modalWrapper,[\n\t\t{transform: \"translateY(0px)\"}\n\t]);\n};\n\nModal.prototype.adjustPageClass = function() {\n\tif($tw.pageContainer) {\n\t\t$tw.utils.toggleClass($tw.pageContainer,\"tc-modal-displayed\",this.modalCount > 0);\n\t}\n};\n\nexports.Modal = Modal;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/modal.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/notifier.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/notifier.js\ntype: application/javascript\nmodule-type: utils\n\nNotifier mechanism\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nvar Notifier = function(wiki) {\n\tthis.wiki = wiki;\n};\n\n/*\nDisplay a notification\n\ttitle: Title of tiddler containing the notification text\n\toptions: see below\nOptions include:\n*/\nNotifier.prototype.display = function(title,options) {\n\toptions = options || {};\n\t// Create the wrapper divs\n\tvar self = this,\n\t\tnotification = document.createElement(\"div\"),\n\t\ttiddler = this.wiki.getTiddler(title),\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\trefreshHandler;\n\t// Don't do anything if the tiddler doesn't exist\n\tif(!tiddler) {\n\t\treturn;\n\t}\n\t// Add classes\n\t$tw.utils.addClass(notification,\"tc-notification\");\n\t// Create the variables\n\tvar variables = $tw.utils.extend({currentTiddler: title},options.variables);\n\t// Render the body of the notification\n\tvar widgetNode = this.wiki.makeTranscludeWidget(title,{parentWidget: $tw.rootWidget, document: document, variables: variables});\n\twidgetNode.render(notification,null);\n\trefreshHandler = function(changes) {\n\t\twidgetNode.refresh(changes,notification,null);\n\t};\n\tthis.wiki.addEventListener(\"change\",refreshHandler);\n\t// Set the initial styles for the notification\n\t$tw.utils.setStyle(notification,[\n\t\t{opacity: \"0\"},\n\t\t{transformOrigin: \"0% 0%\"},\n\t\t{transform: \"translateY(\" + (-window.innerHeight) + \"px)\"},\n\t\t{transition: \"opacity \" + duration + \"ms ease-out, \" + $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms ease-in-out\"}\n\t]);\n\t// Add the notification to the DOM\n\tdocument.body.appendChild(notification);\n\t// Force layout\n\t$tw.utils.forceLayout(notification);\n\t// Set final animated styles\n\t$tw.utils.setStyle(notification,[\n\t\t{opacity: \"1.0\"},\n\t\t{transform: \"translateY(0px)\"}\n\t]);\n\t// Set a timer to remove the notification\n\twindow.setTimeout(function() {\n\t\t// Remove our change event handler\n\t\tself.wiki.removeEventListener(\"change\",refreshHandler);\n\t\t// Force layout and animate the notification away\n\t\t$tw.utils.forceLayout(notification);\n\t\t$tw.utils.setStyle(notification,[\n\t\t\t{opacity: \"0.0\"},\n\t\t\t{transform: \"translateX(\" + (notification.offsetWidth) + \"px)\"}\n\t\t]);\n\t\t// Remove the modal message from the DOM once the transition ends\n\t\tsetTimeout(function() {\n\t\t\tif(notification.parentNode) {\n\t\t\t\tdocument.body.removeChild(notification);\n\t\t\t}\n\t\t},duration);\n\t},$tw.config.preferences.notificationDuration);\n};\n\nexports.Notifier = Notifier;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/notifier.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/popup.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/popup.js\ntype: application/javascript\nmodule-type: utils\n\nModule that creates a $tw.utils.Popup object prototype that manages popups in the browser\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nCreates a Popup object with these options:\n\trootElement: the DOM element to which the popup zapper should be attached\n*/\nvar Popup = function(options) {\n\toptions = options || {};\n\tthis.rootElement = options.rootElement || document.documentElement;\n\tthis.popups = []; // Array of {title:,wiki:,domNode:} objects\n};\n\n/*\nTrigger a popup open or closed. Parameters are in a hashmap:\n\ttitle: title of the tiddler where the popup details are stored\n\tdomNode: dom node to which the popup will be positioned\n\twiki: wiki\n\tforce: if specified, forces the popup state to true or false (instead of toggling it)\n*/\nPopup.prototype.triggerPopup = function(options) {\n\t// Check if this popup is already active\n\tvar index = this.findPopup(options.title);\n\t// Compute the new state\n\tvar state = index === -1;\n\tif(options.force !== undefined) {\n\t\tstate = options.force;\n\t}\n\t// Show or cancel the popup according to the new state\n\tif(state) {\n\t\tthis.show(options);\n\t} else {\n\t\tthis.cancel(index);\n\t}\n};\n\nPopup.prototype.findPopup = function(title) {\n\tvar index = -1;\n\tfor(var t=0; t<this.popups.length; t++) {\n\t\tif(this.popups[t].title === title) {\n\t\t\tindex = t;\n\t\t}\n\t}\n\treturn index;\n};\n\nPopup.prototype.handleEvent = function(event) {\n\tif(event.type === \"click\") {\n\t\t// Find out what was clicked on\n\t\tvar info = this.popupInfo(event.target),\n\t\t\tcancelLevel = info.popupLevel - 1;\n\t\t// Don't remove the level that was clicked on if we clicked on a handle\n\t\tif(info.isHandle) {\n\t\t\tcancelLevel++;\n\t\t}\n\t\t// Cancel\n\t\tthis.cancel(cancelLevel);\n\t}\n};\n\n/*\nFind the popup level containing a DOM node. Returns:\npopupLevel: count of the number of nested popups containing the specified element\nisHandle: true if the specified element is within a popup handle\n*/\nPopup.prototype.popupInfo = function(domNode) {\n\tvar isHandle = false,\n\t\tpopupCount = 0,\n\t\tnode = domNode;\n\t// First check ancestors to see if we're within a popup handle\n\twhile(node) {\n\t\tif($tw.utils.hasClass(node,\"tc-popup-handle\")) {\n\t\t\tisHandle = true;\n\t\t\tpopupCount++;\n\t\t}\n\t\tif($tw.utils.hasClass(node,\"tc-popup-keep\")) {\n\t\t\tisHandle = true;\n\t\t}\n\t\tnode = node.parentNode;\n\t}\n\t// Then count the number of ancestor popups\n\tnode = domNode;\n\twhile(node) {\n\t\tif($tw.utils.hasClass(node,\"tc-popup\")) {\n\t\t\tpopupCount++;\n\t\t}\n\t\tnode = node.parentNode;\n\t}\n\tvar info = {\n\t\tpopupLevel: popupCount,\n\t\tisHandle: isHandle\n\t};\n\treturn info;\n};\n\n/*\nDisplay a popup by adding it to the stack\n*/\nPopup.prototype.show = function(options) {\n\t// Find out what was clicked on\n\tvar info = this.popupInfo(options.domNode);\n\t// Cancel any higher level popups\n\tthis.cancel(info.popupLevel);\n\t// Store the popup details if not already there\n\tif(this.findPopup(options.title) === -1) {\n\t\tthis.popups.push({\n\t\t\ttitle: options.title,\n\t\t\twiki: options.wiki,\n\t\t\tdomNode: options.domNode\n\t\t});\n\t}\n\t// Set the state tiddler\n\toptions.wiki.setTextReference(options.title,\n\t\t\t\"(\" + options.domNode.offsetLeft + \",\" + options.domNode.offsetTop + \",\" + \n\t\t\t\toptions.domNode.offsetWidth + \",\" + options.domNode.offsetHeight + \")\");\n\t// Add the click handler if we have any popups\n\tif(this.popups.length > 0) {\n\t\tthis.rootElement.addEventListener(\"click\",this,true);\t\t\n\t}\n};\n\n/*\nCancel all popups at or above a specified level or DOM node\nlevel: popup level to cancel (0 cancels all popups)\n*/\nPopup.prototype.cancel = function(level) {\n\tvar numPopups = this.popups.length;\n\tlevel = Math.max(0,Math.min(level,numPopups));\n\tfor(var t=level; t<numPopups; t++) {\n\t\tvar popup = this.popups.pop();\n\t\tif(popup.title) {\n\t\t\tpopup.wiki.deleteTiddler(popup.title);\n\t\t}\n\t}\n\tif(this.popups.length === 0) {\n\t\tthis.rootElement.removeEventListener(\"click\",this,false);\n\t}\n};\n\n/*\nReturns true if the specified title and text identifies an active popup\n*/\nPopup.prototype.readPopupState = function(text) {\n\tvar popupLocationRegExp = /^\\((-?[0-9\\.E]+),(-?[0-9\\.E]+),(-?[0-9\\.E]+),(-?[0-9\\.E]+)\\)$/;\n\treturn popupLocationRegExp.test(text);\n};\n\nexports.Popup = Popup;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/popup.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/scroller.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/scroller.js\ntype: application/javascript\nmodule-type: utils\n\nModule that creates a $tw.utils.Scroller object prototype that manages scrolling in the browser\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nEvent handler for when the `tm-scroll` event hits the document body\n*/\nvar PageScroller = function() {\n\tthis.idRequestFrame = null;\n\tthis.requestAnimationFrame = window.requestAnimationFrame ||\n\t\twindow.webkitRequestAnimationFrame ||\n\t\twindow.mozRequestAnimationFrame ||\n\t\tfunction(callback) {\n\t\t\treturn window.setTimeout(callback, 1000/60);\n\t\t};\n\tthis.cancelAnimationFrame = window.cancelAnimationFrame ||\n\t\twindow.webkitCancelAnimationFrame ||\n\t\twindow.webkitCancelRequestAnimationFrame ||\n\t\twindow.mozCancelAnimationFrame ||\n\t\twindow.mozCancelRequestAnimationFrame ||\n\t\tfunction(id) {\n\t\t\twindow.clearTimeout(id);\n\t\t};\n};\n\nPageScroller.prototype.cancelScroll = function() {\n\tif(this.idRequestFrame) {\n\t\tthis.cancelAnimationFrame.call(window,this.idRequestFrame);\n\t\tthis.idRequestFrame = null;\n\t}\n};\n\n/*\nHandle an event\n*/\nPageScroller.prototype.handleEvent = function(event) {\n\tif(event.type === \"tm-scroll\") {\n\t\treturn this.scrollIntoView(event.target);\n\t}\n\treturn true;\n};\n\n/*\nHandle a scroll event hitting the page document\n*/\nPageScroller.prototype.scrollIntoView = function(element) {\n\tvar duration = $tw.utils.getAnimationDuration();\n\t// Now get ready to scroll the body\n\tthis.cancelScroll();\n\tthis.startTime = Date.now();\n\tvar scrollPosition = $tw.utils.getScrollPosition();\n\t// Get the client bounds of the element and adjust by the scroll position\n\tvar clientBounds = element.getBoundingClientRect(),\n\t\tbounds = {\n\t\t\tleft: clientBounds.left + scrollPosition.x,\n\t\t\ttop: clientBounds.top + scrollPosition.y,\n\t\t\twidth: clientBounds.width,\n\t\t\theight: clientBounds.height\n\t\t};\n\t// We'll consider the horizontal and vertical scroll directions separately via this function\n\t// targetPos/targetSize - position and size of the target element\n\t// currentPos/currentSize - position and size of the current scroll viewport\n\t// returns: new position of the scroll viewport\n\tvar getEndPos = function(targetPos,targetSize,currentPos,currentSize) {\n\t\t\tvar newPos = currentPos;\n\t\t\t// If the target is above/left of the current view, then scroll to it's top/left\n\t\t\tif(targetPos <= currentPos) {\n\t\t\t\tnewPos = targetPos;\n\t\t\t// If the target is smaller than the window and the scroll position is too far up, then scroll till the target is at the bottom of the window\n\t\t\t} else if(targetSize < currentSize && currentPos < (targetPos + targetSize - currentSize)) {\n\t\t\t\tnewPos = targetPos + targetSize - currentSize;\n\t\t\t// If the target is big, then just scroll to the top\n\t\t\t} else if(currentPos < targetPos) {\n\t\t\t\tnewPos = targetPos;\n\t\t\t// Otherwise, stay where we are\n\t\t\t} else {\n\t\t\t\tnewPos = currentPos;\n\t\t\t}\n\t\t\t// If we are scrolling within 50 pixels of the top/left then snap to zero\n\t\t\tif(newPos < 50) {\n\t\t\t\tnewPos = 0;\n\t\t\t}\n\t\t\treturn newPos;\n\t\t},\n\t\tendX = getEndPos(bounds.left,bounds.width,scrollPosition.x,window.innerWidth),\n\t\tendY = getEndPos(bounds.top,bounds.height,scrollPosition.y,window.innerHeight);\n\t// Only scroll if the position has changed\n\tif(endX !== scrollPosition.x || endY !== scrollPosition.y) {\n\t\tvar self = this,\n\t\t\tdrawFrame;\n\t\tdrawFrame = function () {\n\t\t\tvar t;\n\t\t\tif(duration <= 0) {\n\t\t\t\tt = 1;\n\t\t\t} else {\n\t\t\t\tt = ((Date.now()) - self.startTime) / duration;\t\n\t\t\t}\n\t\t\tif(t >= 1) {\n\t\t\t\tself.cancelScroll();\n\t\t\t\tt = 1;\n\t\t\t}\n\t\t\tt = $tw.utils.slowInSlowOut(t);\n\t\t\twindow.scrollTo(scrollPosition.x + (endX - scrollPosition.x) * t,scrollPosition.y + (endY - scrollPosition.y) * t);\n\t\t\tif(t < 1) {\n\t\t\t\tself.idRequestFrame = self.requestAnimationFrame.call(window,drawFrame);\n\t\t\t}\n\t\t};\n\t\tdrawFrame();\n\t}\n};\n\nexports.PageScroller = PageScroller;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/scroller.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/edition-info.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/edition-info.js\ntype: application/javascript\nmodule-type: utils-node\n\nInformation about the available editions\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar fs = require(\"fs\"),\n\tpath = require(\"path\");\n\nvar editionInfo;\n\nexports.getEditionInfo = function() {\n\tif(!editionInfo) {\n\t\t// Enumerate the edition paths\n\t\tvar editionPaths = $tw.getLibraryItemSearchPaths($tw.config.editionsPath,$tw.config.editionsEnvVar);\n\t\teditionInfo = {};\n\t\tfor(var editionIndex=0; editionIndex<editionPaths.length; editionIndex++) {\n\t\t\tvar editionPath = editionPaths[editionIndex];\n\t\t\t// Enumerate the folders\n\t\t\tvar entries = fs.readdirSync(editionPath);\n\t\t\tfor(var entryIndex=0; entryIndex<entries.length; entryIndex++) {\n\t\t\t\tvar entry = entries[entryIndex];\n\t\t\t\t// Check if directories have a valid tiddlywiki.info\n\t\t\t\tif(!editionInfo[entry] && $tw.utils.isDirectory(path.resolve(editionPath,entry))) {\n\t\t\t\t\tvar info;\n\t\t\t\t\ttry {\n\t\t\t\t\t\tinfo = JSON.parse(fs.readFileSync(path.resolve(editionPath,entry,\"tiddlywiki.info\"),\"utf8\"));\n\t\t\t\t\t} catch(ex) {\n\t\t\t\t\t}\n\t\t\t\t\tif(info) {\n\t\t\t\t\t\teditionInfo[entry] = info;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn editionInfo;\n};\n\n})();\n",
            "title": "$:/core/modules/utils/edition-info.js",
            "type": "application/javascript",
            "module-type": "utils-node"
        },
        "$:/core/modules/utils/fakedom.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/fakedom.js\ntype: application/javascript\nmodule-type: global\n\nA barebones implementation of DOM interfaces needed by the rendering mechanism.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Sequence number used to enable us to track objects for testing\nvar sequenceNumber = null;\n\nvar bumpSequenceNumber = function(object) {\n\tif(sequenceNumber !== null) {\n\t\tobject.sequenceNumber = sequenceNumber++;\n\t}\n};\n\nvar TW_TextNode = function(text) {\n\tbumpSequenceNumber(this);\n\tthis.textContent = text;\n};\n\nObject.defineProperty(TW_TextNode.prototype, \"nodeType\", {\n\tget: function() {\n\t\treturn 3;\n\t}\n});\n\nObject.defineProperty(TW_TextNode.prototype, \"formattedTextContent\", {\n\tget: function() {\n\t\treturn this.textContent.replace(/(\\r?\\n)/g,\"\");\n\t}\n});\n\nvar TW_Element = function(tag,namespace) {\n\tbumpSequenceNumber(this);\n\tthis.isTiddlyWikiFakeDom = true;\n\tthis.tag = tag;\n\tthis.attributes = {};\n\tthis.isRaw = false;\n\tthis.children = [];\n\tthis.style = {};\n\tthis.namespaceURI = namespace || \"http://www.w3.org/1999/xhtml\";\n};\n\nObject.defineProperty(TW_Element.prototype, \"nodeType\", {\n\tget: function() {\n\t\treturn 1;\n\t}\n});\n\nTW_Element.prototype.getAttribute = function(name) {\n\tif(this.isRaw) {\n\t\tthrow \"Cannot getAttribute on a raw TW_Element\";\n\t}\n\treturn this.attributes[name];\n};\n\nTW_Element.prototype.setAttribute = function(name,value) {\n\tif(this.isRaw) {\n\t\tthrow \"Cannot setAttribute on a raw TW_Element\";\n\t}\n\tthis.attributes[name] = value;\n};\n\nTW_Element.prototype.setAttributeNS = function(namespace,name,value) {\n\tthis.setAttribute(name,value);\n};\n\nTW_Element.prototype.removeAttribute = function(name) {\n\tif(this.isRaw) {\n\t\tthrow \"Cannot removeAttribute on a raw TW_Element\";\n\t}\n\tif($tw.utils.hop(this.attributes,name)) {\n\t\tdelete this.attributes[name];\n\t}\n};\n\nTW_Element.prototype.appendChild = function(node) {\n\tthis.children.push(node);\n\tnode.parentNode = this;\n};\n\nTW_Element.prototype.insertBefore = function(node,nextSibling) {\n\tif(nextSibling) {\n\t\tvar p = this.children.indexOf(nextSibling);\n\t\tif(p !== -1) {\n\t\t\tthis.children.splice(p,0,node);\n\t\t\tnode.parentNode = this;\n\t\t} else {\n\t\t\tthis.appendChild(node);\n\t\t}\n\t} else {\n\t\tthis.appendChild(node);\n\t}\n};\n\nTW_Element.prototype.removeChild = function(node) {\n\tvar p = this.children.indexOf(node);\n\tif(p !== -1) {\n\t\tthis.children.splice(p,1);\n\t}\n};\n\nTW_Element.prototype.hasChildNodes = function() {\n\treturn !!this.children.length;\n};\n\nObject.defineProperty(TW_Element.prototype, \"childNodes\", {\n\tget: function() {\n\t\treturn this.children;\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"firstChild\", {\n\tget: function() {\n\t\treturn this.children[0];\n\t}\n});\n\nTW_Element.prototype.addEventListener = function(type,listener,useCapture) {\n\t// Do nothing\n};\n\nObject.defineProperty(TW_Element.prototype, \"tagName\", {\n\tget: function() {\n\t\treturn this.tag || \"\";\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"className\", {\n\tget: function() {\n\t\treturn this.attributes[\"class\"] || \"\";\n\t},\n\tset: function(value) {\n\t\tthis.attributes[\"class\"] = value;\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"value\", {\n\tget: function() {\n\t\treturn this.attributes.value || \"\";\n\t},\n\tset: function(value) {\n\t\tthis.attributes.value = value;\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"outerHTML\", {\n\tget: function() {\n\t\tvar output = [],attr,a,v;\n\t\toutput.push(\"<\",this.tag);\n\t\tif(this.attributes) {\n\t\t\tattr = [];\n\t\t\tfor(a in this.attributes) {\n\t\t\t\tattr.push(a);\n\t\t\t}\n\t\t\tattr.sort();\n\t\t\tfor(a=0; a<attr.length; a++) {\n\t\t\t\tv = this.attributes[attr[a]];\n\t\t\t\tif(v !== undefined) {\n\t\t\t\t\toutput.push(\" \",attr[a],\"=\\\"\",$tw.utils.htmlEncode(v),\"\\\"\");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif(this.style) {\n\t\t\tvar style = [];\n\t\t\tfor(var s in this.style) {\n\t\t\t\tstyle.push(s + \":\" + this.style[s] + \";\");\n\t\t\t}\n\t\t\tif(style.length > 0) {\n\t\t\t\toutput.push(\" style=\\\"\",style.join(\"\"),\"\\\"\")\n\t\t\t}\n\t\t}\n\t\toutput.push(\">\");\n\t\tif($tw.config.htmlVoidElements.indexOf(this.tag) === -1) {\n\t\t\toutput.push(this.innerHTML);\n\t\t\toutput.push(\"</\",this.tag,\">\");\n\t\t}\n\t\treturn output.join(\"\");\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"innerHTML\", {\n\tget: function() {\n\t\tif(this.isRaw) {\n\t\t\treturn this.rawHTML;\n\t\t} else {\n\t\t\tvar b = [];\n\t\t\t$tw.utils.each(this.children,function(node) {\n\t\t\t\tif(node instanceof TW_Element) {\n\t\t\t\t\tb.push(node.outerHTML);\n\t\t\t\t} else if(node instanceof TW_TextNode) {\n\t\t\t\t\tb.push($tw.utils.htmlEncode(node.textContent));\n\t\t\t\t}\n\t\t\t});\n\t\t\treturn b.join(\"\");\n\t\t}\n\t},\n\tset: function(value) {\n\t\tthis.isRaw = true;\n\t\tthis.rawHTML = value;\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"textContent\", {\n\tget: function() {\n\t\tif(this.isRaw) {\n\t\t\tthrow \"Cannot get textContent on a raw TW_Element\";\n\t\t} else {\n\t\t\tvar b = [];\n\t\t\t$tw.utils.each(this.children,function(node) {\n\t\t\t\tb.push(node.textContent);\n\t\t\t});\n\t\t\treturn b.join(\"\");\n\t\t}\n\t},\n\tset: function(value) {\n\t\tthis.children = [new TW_TextNode(value)];\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"formattedTextContent\", {\n\tget: function() {\n\t\tif(this.isRaw) {\n\t\t\tthrow \"Cannot get formattedTextContent on a raw TW_Element\";\n\t\t} else {\n\t\t\tvar b = [],\n\t\t\t\tisBlock = $tw.config.htmlBlockElements.indexOf(this.tag) !== -1;\n\t\t\tif(isBlock) {\n\t\t\t\tb.push(\"\\n\");\n\t\t\t}\n\t\t\tif(this.tag === \"li\") {\n\t\t\t\tb.push(\"* \");\n\t\t\t}\n\t\t\t$tw.utils.each(this.children,function(node) {\n\t\t\t\tb.push(node.formattedTextContent);\n\t\t\t});\n\t\t\tif(isBlock) {\n\t\t\t\tb.push(\"\\n\");\n\t\t\t}\n\t\t\treturn b.join(\"\");\n\t\t}\n\t}\n});\n\nvar document = {\n\tsetSequenceNumber: function(value) {\n\t\tsequenceNumber = value;\n\t},\n\tcreateElementNS: function(namespace,tag) {\n\t\treturn new TW_Element(tag,namespace);\n\t},\n\tcreateElement: function(tag) {\n\t\treturn new TW_Element(tag);\n\t},\n\tcreateTextNode: function(text) {\n\t\treturn new TW_TextNode(text);\n\t},\n\tcompatMode: \"CSS1Compat\", // For KaTeX to know that we're not a browser in quirks mode\n\tisTiddlyWikiFakeDom: true\n};\n\nexports.fakeDocument = document;\n\n})();\n",
            "title": "$:/core/modules/utils/fakedom.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/utils/filesystem.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/filesystem.js\ntype: application/javascript\nmodule-type: utils-node\n\nFile system utilities\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar fs = require(\"fs\"),\n\tpath = require(\"path\");\n\n/*\nRecursively (and synchronously) copy a directory and all its content\n*/\nexports.copyDirectory = function(srcPath,dstPath) {\n\t// Remove any trailing path separators\n\tsrcPath = $tw.utils.removeTrailingSeparator(srcPath);\n\tdstPath = $tw.utils.removeTrailingSeparator(dstPath);\n\t// Create the destination directory\n\tvar err = $tw.utils.createDirectory(dstPath);\n\tif(err) {\n\t\treturn err;\n\t}\n\t// Function to copy a folder full of files\n\tvar copy = function(srcPath,dstPath) {\n\t\tvar srcStats = fs.lstatSync(srcPath),\n\t\t\tdstExists = fs.existsSync(dstPath);\n\t\tif(srcStats.isFile()) {\n\t\t\t$tw.utils.copyFile(srcPath,dstPath);\n\t\t} else if(srcStats.isDirectory()) {\n\t\t\tvar items = fs.readdirSync(srcPath);\n\t\t\tfor(var t=0; t<items.length; t++) {\n\t\t\t\tvar item = items[t],\n\t\t\t\t\terr = copy(srcPath + path.sep + item,dstPath + path.sep + item);\n\t\t\t\tif(err) {\n\t\t\t\t\treturn err;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\tcopy(srcPath,dstPath);\n\treturn null;\n};\n\n/*\nCopy a file\n*/\nvar FILE_BUFFER_LENGTH = 64 * 1024,\n\tfileBuffer;\n\nexports.copyFile = function(srcPath,dstPath) {\n\t// Create buffer if required\n\tif(!fileBuffer) {\n\t\tfileBuffer = new Buffer(FILE_BUFFER_LENGTH);\n\t}\n\t// Create any directories in the destination\n\t$tw.utils.createDirectory(path.dirname(dstPath));\n\t// Copy the file\n\tvar srcFile = fs.openSync(srcPath,\"r\"),\n\t\tdstFile = fs.openSync(dstPath,\"w\"),\n\t\tbytesRead = 1,\n\t\tpos = 0;\n\twhile (bytesRead > 0) {\n\t\tbytesRead = fs.readSync(srcFile,fileBuffer,0,FILE_BUFFER_LENGTH,pos);\n\t\tfs.writeSync(dstFile,fileBuffer,0,bytesRead);\n\t\tpos += bytesRead;\n\t}\n\tfs.closeSync(srcFile);\n\tfs.closeSync(dstFile);\n\treturn null;\n};\n\n/*\nRemove trailing path separator\n*/\nexports.removeTrailingSeparator = function(dirPath) {\n\tvar len = dirPath.length;\n\tif(dirPath.charAt(len-1) === path.sep) {\n\t\tdirPath = dirPath.substr(0,len-1);\n\t}\n\treturn dirPath;\n};\n\n/*\nRecursively create a directory\n*/\nexports.createDirectory = function(dirPath) {\n\tif(dirPath.substr(dirPath.length-1,1) !== path.sep) {\n\t\tdirPath = dirPath + path.sep;\n\t}\n\tvar pos = 1;\n\tpos = dirPath.indexOf(path.sep,pos);\n\twhile(pos !== -1) {\n\t\tvar subDirPath = dirPath.substr(0,pos);\n\t\tif(!$tw.utils.isDirectory(subDirPath)) {\n\t\t\ttry {\n\t\t\t\tfs.mkdirSync(subDirPath);\n\t\t\t} catch(e) {\n\t\t\t\treturn \"Error creating directory '\" + subDirPath + \"'\";\n\t\t\t}\n\t\t}\n\t\tpos = dirPath.indexOf(path.sep,pos + 1);\n\t}\n\treturn null;\n};\n\n/*\nRecursively create directories needed to contain a specified file\n*/\nexports.createFileDirectories = function(filePath) {\n\treturn $tw.utils.createDirectory(path.dirname(filePath));\n};\n\n/*\nRecursively delete a directory\n*/\nexports.deleteDirectory = function(dirPath) {\n\tif(fs.existsSync(dirPath)) {\n\t\tvar entries = fs.readdirSync(dirPath);\n\t\tfor(var entryIndex=0; entryIndex<entries.length; entryIndex++) {\n\t\t\tvar currPath = dirPath + path.sep + entries[entryIndex];\n\t\t\tif(fs.lstatSync(currPath).isDirectory()) {\n\t\t\t\t$tw.utils.deleteDirectory(currPath);\n\t\t\t} else {\n\t\t\t\tfs.unlinkSync(currPath);\n\t\t\t}\n\t\t}\n\tfs.rmdirSync(dirPath);\n\t}\n\treturn null;\n};\n\n/*\nCheck if a path identifies a directory\n*/\nexports.isDirectory = function(dirPath) {\n\treturn fs.existsSync(dirPath) && fs.statSync(dirPath).isDirectory();\n};\n\n/*\nCheck if a path identifies a directory that is empty\n*/\nexports.isDirectoryEmpty = function(dirPath) {\n\tif(!$tw.utils.isDirectory(dirPath)) {\n\t\treturn false;\n\t}\n\tvar files = fs.readdirSync(dirPath),\n\t\tempty = true;\n\t$tw.utils.each(files,function(file,index) {\n\t\tif(file.charAt(0) !== \".\") {\n\t\t\tempty = false;\n\t\t}\n\t});\n\treturn empty;\n};\n\n/*\nRecursively delete a tree of empty directories\n*/\nexports.deleteEmptyDirs = function(dirpath,callback) {\n\tvar self = this;\n\tfs.readdir(dirpath,function(err,files) {\n\t\tif(err) {\n\t\t\treturn callback(err);\n\t\t}\n\t\tif(files.length > 0) {\n\t\t\treturn callback(null);\n\t\t}\n\t\tfs.rmdir(dirpath,function(err) {\n\t\t\tif(err) {\n\t\t\t\treturn callback(err);\n\t\t\t}\n\t\t\tself.deleteEmptyDirs(path.dirname(dirpath),callback);\n\t\t});\n\t});\n};\n\n})();\n",
            "title": "$:/core/modules/utils/filesystem.js",
            "type": "application/javascript",
            "module-type": "utils-node"
        },
        "$:/core/modules/utils/logger.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/logger.js\ntype: application/javascript\nmodule-type: utils\n\nA basic logging implementation\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar ALERT_TAG = \"$:/tags/Alert\";\n\n/*\nMake a new logger\n*/\nfunction Logger(componentName) {\n\tthis.componentName = componentName || \"\";\n}\n\n/*\nLog a message\n*/\nLogger.prototype.log = function(/* args */) {\n\tif(console !== undefined && console.log !== undefined) {\n\t\treturn Function.apply.call(console.log, console, [this.componentName + \":\"].concat(Array.prototype.slice.call(arguments,0)));\n\t}\n};\n\n/*\nAlert a message\n*/\nLogger.prototype.alert = function(/* args */) {\n\t// Prepare the text of the alert\n\tvar text = Array.prototype.join.call(arguments,\" \");\n\t// Create alert tiddlers in the browser\n\tif($tw.browser) {\n\t\t// Check if there is an existing alert with the same text and the same component\n\t\tvar existingAlerts = $tw.wiki.getTiddlersWithTag(ALERT_TAG),\n\t\t\talertFields,\n\t\t\texistingCount,\n\t\t\tself = this;\n\t\t$tw.utils.each(existingAlerts,function(title) {\n\t\t\tvar tiddler = $tw.wiki.getTiddler(title);\n\t\t\tif(tiddler.fields.text === text && tiddler.fields.component === self.componentName && tiddler.fields.modified && (!alertFields || tiddler.fields.modified < alertFields.modified)) {\n\t\t\t\t\talertFields = $tw.utils.extend({},tiddler.fields);\n\t\t\t}\n\t\t});\n\t\tif(alertFields) {\n\t\t\texistingCount = alertFields.count || 1;\n\t\t} else {\n\t\t\talertFields = {\n\t\t\t\ttitle: $tw.wiki.generateNewTitle(\"$:/temp/alerts/alert\",{prefix: \"\"}),\n\t\t\t\ttext: text,\n\t\t\t\ttags: [ALERT_TAG],\n\t\t\t\tcomponent: this.componentName\n\t\t\t};\n\t\t\texistingCount = 0;\n\t\t}\n\t\talertFields.modified = new Date();\n\t\tif(++existingCount > 1) {\n\t\t\talertFields.count = existingCount;\n\t\t} else {\n\t\t\talertFields.count = undefined;\n\t\t}\n\t\t$tw.wiki.addTiddler(new $tw.Tiddler(alertFields));\n\t\t// Log the alert as well\n\t\tthis.log.apply(this,Array.prototype.slice.call(arguments,0));\n\t} else {\n\t\t// Print an orange message to the console if not in the browser\n\t\tconsole.error(\"\\x1b[1;33m\" + text + \"\\x1b[0m\");\n\t}\n};\n\nexports.Logger = Logger;\n\n})();\n",
            "title": "$:/core/modules/utils/logger.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/parsetree.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/parsetree.js\ntype: application/javascript\nmodule-type: utils\n\nParse tree utility functions.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.addAttributeToParseTreeNode = function(node,name,value) {\n\tnode.attributes = node.attributes || {};\n\tnode.attributes[name] = {type: \"string\", value: value};\n};\n\nexports.getAttributeValueFromParseTreeNode = function(node,name,defaultValue) {\n\tif(node.attributes && node.attributes[name] && node.attributes[name].value !== undefined) {\n\t\treturn node.attributes[name].value;\n\t}\n\treturn defaultValue;\n};\n\nexports.addClassToParseTreeNode = function(node,classString) {\n\tvar classes = [];\n\tnode.attributes = node.attributes || {};\n\tnode.attributes[\"class\"] = node.attributes[\"class\"] || {type: \"string\", value: \"\"};\n\tif(node.attributes[\"class\"].type === \"string\") {\n\t\tif(node.attributes[\"class\"].value !== \"\") {\n\t\t\tclasses = node.attributes[\"class\"].value.split(\" \");\n\t\t}\n\t\tif(classString !== \"\") {\n\t\t\t$tw.utils.pushTop(classes,classString.split(\" \"));\n\t\t}\n\t\tnode.attributes[\"class\"].value = classes.join(\" \");\n\t}\n};\n\nexports.addStyleToParseTreeNode = function(node,name,value) {\n\t\tnode.attributes = node.attributes || {};\n\t\tnode.attributes.style = node.attributes.style || {type: \"string\", value: \"\"};\n\t\tif(node.attributes.style.type === \"string\") {\n\t\t\tnode.attributes.style.value += name + \":\" + value + \";\";\n\t\t}\n};\n\nexports.findParseTreeNode = function(nodeArray,search) {\n\tfor(var t=0; t<nodeArray.length; t++) {\n\t\tif(nodeArray[t].type === search.type && nodeArray[t].tag === search.tag) {\n\t\t\treturn nodeArray[t];\n\t\t}\n\t}\n\treturn undefined;\n};\n\n/*\nHelper to get the text of a parse tree node or array of nodes\n*/\nexports.getParseTreeText = function getParseTreeText(tree) {\n\tvar output = [];\n\tif($tw.utils.isArray(tree)) {\n\t\t$tw.utils.each(tree,function(node) {\n\t\t\toutput.push(getParseTreeText(node));\n\t\t});\n\t} else {\n\t\tif(tree.type === \"text\") {\n\t\t\toutput.push(tree.text);\n\t\t}\n\t\tif(tree.children) {\n\t\t\treturn getParseTreeText(tree.children);\n\t\t}\n\t}\n\treturn output.join(\"\");\n};\n\n})();\n",
            "title": "$:/core/modules/utils/parsetree.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/performance.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/performance.js\ntype: application/javascript\nmodule-type: global\n\nPerformance measurement.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nfunction Performance(enabled) {\n\tthis.enabled = !!enabled;\n\tthis.measures = {}; // Hashmap of current values of measurements\n\tthis.logger = new $tw.utils.Logger(\"performance\");\n}\n\n/*\nWrap performance reporting around a top level function\n*/\nPerformance.prototype.report = function(name,fn) {\n\tvar self = this;\n\tif(this.enabled) {\n\t\treturn function() {\n\t\t\tself.measures = {};\n\t\t\tvar startTime = $tw.utils.timer(),\n\t\t\t\tresult = fn.apply(this,arguments);\n\t\t\tself.logger.log(name + \": \" + $tw.utils.timer(startTime).toFixed(2) + \"ms\");\n\t\t\tfor(var m in self.measures) {\n\t\t\t\tself.logger.log(\"+\" + m + \": \" + self.measures[m].toFixed(2) + \"ms\");\n\t\t\t}\n\t\t\treturn result;\n\t\t};\n\t} else {\n\t\treturn fn;\n\t}\n};\n\n/*\nWrap performance measurements around a subfunction\n*/\nPerformance.prototype.measure = function(name,fn) {\n\tvar self = this;\n\tif(this.enabled) {\n\t\treturn function() {\n\t\t\tvar startTime = $tw.utils.timer(),\n\t\t\t\tresult = fn.apply(this,arguments),\n\t\t\t\tvalue = self.measures[name] || 0;\n\t\t\tself.measures[name] = value + $tw.utils.timer(startTime);\n\t\t\treturn result;\n\t\t};\n\t} else {\n\t\treturn fn;\n\t}\n};\n\nexports.Performance = Performance;\n\n})();\n",
            "title": "$:/core/modules/utils/performance.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/utils/pluginmaker.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/pluginmaker.js\ntype: application/javascript\nmodule-type: utils\n\nA quick and dirty way to pack up plugins within the browser.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nRepack a plugin, and then delete any non-shadow payload tiddlers\n*/\nexports.repackPlugin = function(title,additionalTiddlers,excludeTiddlers) {\n\tadditionalTiddlers = additionalTiddlers || [];\n\texcludeTiddlers = excludeTiddlers || [];\n\t// Get the plugin tiddler\n\tvar pluginTiddler = $tw.wiki.getTiddler(title);\n\tif(!pluginTiddler) {\n\t\tthrow \"No such tiddler as \" + title;\n\t}\n\t// Extract the JSON\n\tvar jsonPluginTiddler;\n\ttry {\n\t\tjsonPluginTiddler = JSON.parse(pluginTiddler.fields.text);\n\t} catch(e) {\n\t\tthrow \"Cannot parse plugin tiddler \" + title + \"\\n\" + $tw.language.getString(\"Error/Caption\") + \": \" + e;\n\t}\n\t// Get the list of tiddlers\n\tvar tiddlers = Object.keys(jsonPluginTiddler.tiddlers);\n\t// Add the additional tiddlers\n\t$tw.utils.pushTop(tiddlers,additionalTiddlers);\n\t// Remove any excluded tiddlers\n\tfor(var t=tiddlers.length-1; t>=0; t--) {\n\t\tif(excludeTiddlers.indexOf(tiddlers[t]) !== -1) {\n\t\t\ttiddlers.splice(t,1);\n\t\t}\n\t}\n\t// Pack up the tiddlers into a block of JSON\n\tvar plugins = {};\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar tiddler = $tw.wiki.getTiddler(title),\n\t\t\tfields = {};\n\t\t$tw.utils.each(tiddler.fields,function (value,name) {\n\t\t\tfields[name] = tiddler.getFieldString(name);\n\t\t});\n\t\tplugins[title] = fields;\n\t});\n\t// Retrieve and bump the version number\n\tvar pluginVersion = $tw.utils.parseVersion(pluginTiddler.getFieldString(\"version\") || \"0.0.0\") || {\n\t\t\tmajor: \"0\",\n\t\t\tminor: \"0\",\n\t\t\tpatch: \"0\"\n\t\t};\n\tpluginVersion.patch++;\n\tvar version = pluginVersion.major + \".\" + pluginVersion.minor + \".\" + pluginVersion.patch;\n\tif(pluginVersion.prerelease) {\n\t\tversion += \"-\" + pluginVersion.prerelease;\n\t}\n\tif(pluginVersion.build) {\n\t\tversion += \"+\" + pluginVersion.build;\n\t}\n\t// Save the tiddler\n\t$tw.wiki.addTiddler(new $tw.Tiddler(pluginTiddler,{text: JSON.stringify({tiddlers: plugins},null,4), version: version}));\n\t// Delete any non-shadow constituent tiddlers\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tif($tw.wiki.tiddlerExists(title)) {\n\t\t\t$tw.wiki.deleteTiddler(title);\n\t\t}\n\t});\n\t// Trigger an autosave\n\t$tw.rootWidget.dispatchEvent({type: \"tm-auto-save-wiki\"});\n\t// Return a heartwarming confirmation\n\treturn \"Plugin \" + title + \" successfully saved\";\n};\n\n})();\n",
            "title": "$:/core/modules/utils/pluginmaker.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/utils.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/utils.js\ntype: application/javascript\nmodule-type: utils\n\nVarious static utility functions.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nDisplay a warning, in colour if we're on a terminal\n*/\nexports.warning = function(text) {\n\tconsole.log($tw.node ? \"\\x1b[1;33m\" + text + \"\\x1b[0m\" : text);\n};\n\n/*\nRepeats a string\n*/\nexports.repeat = function(str,count) {\n\tvar result = \"\";\n\tfor(var t=0;t<count;t++) {\n\t\tresult += str;\n\t}\n\treturn result;\n};\n\n/*\nTrim whitespace from the start and end of a string\nThanks to Steven Levithan, http://blog.stevenlevithan.com/archives/faster-trim-javascript\n*/\nexports.trim = function(str) {\n\tif(typeof str === \"string\") {\n\t\treturn str.replace(/^\\s\\s*/, '').replace(/\\s\\s*$/, '');\n\t} else {\n\t\treturn str;\n\t}\n};\n\n/*\nFind the line break preceding a given position in a string\nReturns position immediately after that line break, or the start of the string\n*/\nexports.findPrecedingLineBreak = function(text,pos) {\n\tvar result = text.lastIndexOf(\"\\n\",pos - 1);\n\tif(result === -1) {\n\t\tresult = 0;\n\t} else {\n\t\tresult++;\n\t\tif(text.charAt(result) === \"\\r\") {\n\t\t\tresult++;\n\t\t}\n\t}\n\treturn result;\n};\n\n/*\nFind the line break following a given position in a string\n*/\nexports.findFollowingLineBreak = function(text,pos) {\n\t// Cut to just past the following line break, or to the end of the text\n\tvar result = text.indexOf(\"\\n\",pos);\n\tif(result === -1) {\n\t\tresult = text.length;\n\t} else {\n\t\tif(text.charAt(result) === \"\\r\") {\n\t\t\tresult++;\n\t\t}\n\t}\n\treturn result;\n};\n\n/*\nReturn the number of keys in an object\n*/\nexports.count = function(object) {\n\treturn Object.keys(object || {}).length;\n};\n\n/*\nCheck if an array is equal by value and by reference.\n*/\nexports.isArrayEqual = function(array1,array2) {\n\tif(array1 === array2) {\n\t\treturn true;\n\t}\n\tarray1 = array1 || [];\n\tarray2 = array2 || [];\n\tif(array1.length !== array2.length) {\n\t\treturn false;\n\t}\n\treturn array1.every(function(value,index) {\n\t\treturn value === array2[index];\n\t});\n};\n\n/*\nPush entries onto an array, removing them first if they already exist in the array\n\tarray: array to modify (assumed to be free of duplicates)\n\tvalue: a single value to push or an array of values to push\n*/\nexports.pushTop = function(array,value) {\n\tvar t,p;\n\tif($tw.utils.isArray(value)) {\n\t\t// Remove any array entries that are duplicated in the new values\n\t\tif(value.length !== 0) {\n\t\t\tif(array.length !== 0) {\n\t\t\t\tif(value.length < array.length) {\n\t\t\t\t\tfor(t=0; t<value.length; t++) {\n\t\t\t\t\t\tp = array.indexOf(value[t]);\n\t\t\t\t\t\tif(p !== -1) {\n\t\t\t\t\t\t\tarray.splice(p,1);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfor(t=array.length-1; t>=0; t--) {\n\t\t\t\t\t\tp = value.indexOf(array[t]);\n\t\t\t\t\t\tif(p !== -1) {\n\t\t\t\t\t\t\tarray.splice(t,1);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Push the values on top of the main array\n\t\t\tarray.push.apply(array,value);\n\t\t}\n\t} else {\n\t\tp = array.indexOf(value);\n\t\tif(p !== -1) {\n\t\t\tarray.splice(p,1);\n\t\t}\n\t\tarray.push(value);\n\t}\n\treturn array;\n};\n\n/*\nRemove entries from an array\n\tarray: array to modify\n\tvalue: a single value to remove, or an array of values to remove\n*/\nexports.removeArrayEntries = function(array,value) {\n\tvar t,p;\n\tif($tw.utils.isArray(value)) {\n\t\tfor(t=0; t<value.length; t++) {\n\t\t\tp = array.indexOf(value[t]);\n\t\t\tif(p !== -1) {\n\t\t\t\tarray.splice(p,1);\n\t\t\t}\n\t\t}\n\t} else {\n\t\tp = array.indexOf(value);\n\t\tif(p !== -1) {\n\t\t\tarray.splice(p,1);\n\t\t}\n\t}\n};\n\n/*\nCheck whether any members of a hashmap are present in another hashmap\n*/\nexports.checkDependencies = function(dependencies,changes) {\n\tvar hit = false;\n\t$tw.utils.each(changes,function(change,title) {\n\t\tif($tw.utils.hop(dependencies,title)) {\n\t\t\thit = true;\n\t\t}\n\t});\n\treturn hit;\n};\n\nexports.extend = function(object /* [, src] */) {\n\t$tw.utils.each(Array.prototype.slice.call(arguments, 1), function(source) {\n\t\tif(source) {\n\t\t\tfor(var property in source) {\n\t\t\t\tobject[property] = source[property];\n\t\t\t}\n\t\t}\n\t});\n\treturn object;\n};\n\nexports.deepCopy = function(object) {\n\tvar result,t;\n\tif($tw.utils.isArray(object)) {\n\t\t// Copy arrays\n\t\tresult = object.slice(0);\n\t} else if(typeof object === \"object\") {\n\t\tresult = {};\n\t\tfor(t in object) {\n\t\t\tif(object[t] !== undefined) {\n\t\t\t\tresult[t] = $tw.utils.deepCopy(object[t]);\n\t\t\t}\n\t\t}\n\t} else {\n\t\tresult = object;\n\t}\n\treturn result;\n};\n\nexports.extendDeepCopy = function(object,extendedProperties) {\n\tvar result = $tw.utils.deepCopy(object),t;\n\tfor(t in extendedProperties) {\n\t\tif(extendedProperties[t] !== undefined) {\n\t\t\tresult[t] = $tw.utils.deepCopy(extendedProperties[t]);\n\t\t}\n\t}\n\treturn result;\n};\n\nexports.deepFreeze = function deepFreeze(object) {\n\tvar property, key;\n\tObject.freeze(object);\n\tfor(key in object) {\n\t\tproperty = object[key];\n\t\tif($tw.utils.hop(object,key) && (typeof property === \"object\") && !Object.isFrozen(property)) {\n\t\t\tdeepFreeze(property);\n\t\t}\n\t}\n};\n\nexports.slowInSlowOut = function(t) {\n\treturn (1 - ((Math.cos(t * Math.PI) + 1) / 2));\n};\n\nexports.formatDateString = function(date,template) {\n\tvar result = \"\",\n\t\tt = template,\n\t\tmatches = [\n\t\t\t[/^0hh12/, function() {\n\t\t\t\treturn $tw.utils.pad($tw.utils.getHours12(date));\n\t\t\t}],\n\t\t\t[/^wYYYY/, function() {\n\t\t\t\treturn $tw.utils.getYearForWeekNo(date);\n\t\t\t}],\n\t\t\t[/^hh12/, function() {\n\t\t\t\treturn $tw.utils.getHours12(date);\n\t\t\t}],\n\t\t\t[/^DDth/, function() {\n\t\t\t\treturn date.getDate() + $tw.utils.getDaySuffix(date);\n\t\t\t}],\n\t\t\t[/^YYYY/, function() {\n\t\t\t\treturn date.getFullYear();\n\t\t\t}],\n\t\t\t[/^0hh/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getHours());\n\t\t\t}],\n\t\t\t[/^0mm/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getMinutes());\n\t\t\t}],\n\t\t\t[/^0ss/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getSeconds());\n\t\t\t}],\n\t\t\t[/^0DD/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getDate());\n\t\t\t}],\n\t\t\t[/^0MM/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getMonth()+1);\n\t\t\t}],\n\t\t\t[/^0WW/, function() {\n\t\t\t\treturn $tw.utils.pad($tw.utils.getWeek(date));\n\t\t\t}],\n\t\t\t[/^ddd/, function() {\n\t\t\t\treturn $tw.language.getString(\"Date/Short/Day/\" + date.getDay());\n\t\t\t}],\n\t\t\t[/^mmm/, function() {\n\t\t\t\treturn $tw.language.getString(\"Date/Short/Month/\" + (date.getMonth() + 1));\n\t\t\t}],\n\t\t\t[/^DDD/, function() {\n\t\t\t\treturn $tw.language.getString(\"Date/Long/Day/\" + date.getDay());\n\t\t\t}],\n\t\t\t[/^MMM/, function() {\n\t\t\t\treturn $tw.language.getString(\"Date/Long/Month/\" + (date.getMonth() + 1));\n\t\t\t}],\n\t\t\t[/^TZD/, function() {\n\t\t\t\tvar tz = date.getTimezoneOffset(),\n\t\t\t\tatz = Math.abs(tz);\n\t\t\t\treturn (tz < 0 ? '+' : '-') + $tw.utils.pad(Math.floor(atz / 60)) + ':' + $tw.utils.pad(atz % 60);\n\t\t\t}],\n\t\t\t[/^wYY/, function() {\n\t\t\t\treturn $tw.utils.pad($tw.utils.getYearForWeekNo(date) - 2000);\n\t\t\t}],\n\t\t\t[/^[ap]m/, function() {\n\t\t\t\treturn $tw.utils.getAmPm(date).toLowerCase();\n\t\t\t}],\n\t\t\t[/^hh/, function() {\n\t\t\t\treturn date.getHours();\n\t\t\t}],\n\t\t\t[/^mm/, function() {\n\t\t\t\treturn date.getMinutes();\n\t\t\t}],\n\t\t\t[/^ss/, function() {\n\t\t\t\treturn date.getSeconds();\n\t\t\t}],\n\t\t\t[/^[AP]M/, function() {\n\t\t\t\treturn $tw.utils.getAmPm(date).toUpperCase();\n\t\t\t}],\n\t\t\t[/^DD/, function() {\n\t\t\t\treturn date.getDate();\n\t\t\t}],\n\t\t\t[/^MM/, function() {\n\t\t\t\treturn date.getMonth() + 1;\n\t\t\t}],\n\t\t\t[/^WW/, function() {\n\t\t\t\treturn $tw.utils.getWeek(date);\n\t\t\t}],\n\t\t\t[/^YY/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getFullYear() - 2000);\n\t\t\t}]\n\t\t];\n\twhile(t.length){\n\t\tvar matchString = \"\";\n\t\t$tw.utils.each(matches, function(m) {\n\t\t\tvar match = m[0].exec(t);\n\t\t\tif(match) {\n\t\t\t\tmatchString = m[1].call();\n\t\t\t\tt = t.substr(match[0].length);\n\t\t\t\treturn false;\n\t\t\t}\n\t\t});\n\t\tif(matchString) {\n\t\t\tresult += matchString;\n\t\t} else {\n\t\t\tresult += t.charAt(0);\n\t\t\tt = t.substr(1);\n\t\t}\n\t}\n\tresult = result.replace(/\\\\(.)/g,\"$1\");\n\treturn result;\n};\n\nexports.getAmPm = function(date) {\n\treturn $tw.language.getString(\"Date/Period/\" + (date.getHours() >= 12 ? \"pm\" : \"am\"));\n};\n\nexports.getDaySuffix = function(date) {\n\treturn $tw.language.getString(\"Date/DaySuffix/\" + date.getDate());\n};\n\nexports.getWeek = function(date) {\n\tvar dt = new Date(date.getTime());\n\tvar d = dt.getDay();\n\tif(d === 0) {\n\t\td = 7; // JavaScript Sun=0, ISO Sun=7\n\t}\n\tdt.setTime(dt.getTime() + (4 - d) * 86400000);// shift day to Thurs of same week to calculate weekNo\n\tvar n = Math.floor((dt.getTime()-new Date(dt.getFullYear(),0,1) + 3600000) / 86400000);\n\treturn Math.floor(n / 7) + 1;\n};\n\nexports.getYearForWeekNo = function(date) {\n\tvar dt = new Date(date.getTime());\n\tvar d = dt.getDay();\n\tif(d === 0) {\n\t\td = 7; // JavaScript Sun=0, ISO Sun=7\n\t}\n\tdt.setTime(dt.getTime() + (4 - d) * 86400000);// shift day to Thurs of same week\n\treturn dt.getFullYear();\n};\n\nexports.getHours12 = function(date) {\n\tvar h = date.getHours();\n\treturn h > 12 ? h-12 : ( h > 0 ? h : 12 );\n};\n\n/*\nConvert a date delta in milliseconds into a string representation of \"23 seconds ago\", \"27 minutes ago\" etc.\n\tdelta: delta in milliseconds\nReturns an object with these members:\n\tdescription: string describing the delta period\n\tupdatePeriod: time in millisecond until the string will be inaccurate\n*/\nexports.getRelativeDate = function(delta) {\n\tvar futurep = false;\n\tif(delta < 0) {\n\t\tdelta = -1 * delta;\n\t\tfuturep = true;\n\t}\n\tvar units = [\n\t\t{name: \"Years\",   duration:      365 * 24 * 60 * 60 * 1000},\n\t\t{name: \"Months\",  duration: (365/12) * 24 * 60 * 60 * 1000},\n\t\t{name: \"Days\",    duration:            24 * 60 * 60 * 1000},\n\t\t{name: \"Hours\",   duration:                 60 * 60 * 1000},\n\t\t{name: \"Minutes\", duration:                      60 * 1000},\n\t\t{name: \"Seconds\", duration:                           1000}\n\t];\n\tfor(var t=0; t<units.length; t++) {\n\t\tvar result = Math.floor(delta / units[t].duration);\n\t\tif(result >= 2) {\n\t\t\treturn {\n\t\t\t\tdelta: delta,\n\t\t\t\tdescription: $tw.language.getString(\n\t\t\t\t\t\"RelativeDate/\" + (futurep ? \"Future\" : \"Past\") + \"/\" + units[t].name,\n\t\t\t\t\t{variables:\n\t\t\t\t\t\t{period: result.toString()}\n\t\t\t\t\t}\n\t\t\t\t),\n\t\t\t\tupdatePeriod: units[t].duration\n\t\t\t};\n\t\t}\n\t}\n\treturn {\n\t\tdelta: delta,\n\t\tdescription: $tw.language.getString(\n\t\t\t\"RelativeDate/\" + (futurep ? \"Future\" : \"Past\") + \"/Second\",\n\t\t\t{variables:\n\t\t\t\t{period: \"1\"}\n\t\t\t}\n\t\t),\n\t\tupdatePeriod: 1000\n\t};\n};\n\n// Convert & to \"&amp;\", < to \"&lt;\", > to \"&gt;\", \" to \"&quot;\"\nexports.htmlEncode = function(s) {\n\tif(s) {\n\t\treturn s.toString().replace(/&/mg,\"&amp;\").replace(/</mg,\"&lt;\").replace(/>/mg,\"&gt;\").replace(/\\\"/mg,\"&quot;\");\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\n// Converts all HTML entities to their character equivalents\nexports.entityDecode = function(s) {\n\tvar converter = String.fromCodePoint || String.fromCharCode,\n\t\te = s.substr(1,s.length-2); // Strip the & and the ;\n\tif(e.charAt(0) === \"#\") {\n\t\tif(e.charAt(1) === \"x\" || e.charAt(1) === \"X\") {\n\t\t\treturn converter(parseInt(e.substr(2),16));\t\n\t\t} else {\n\t\t\treturn converter(parseInt(e.substr(1),10));\n\t\t}\n\t} else {\n\t\tvar c = $tw.config.htmlEntities[e];\n\t\tif(c) {\n\t\t\treturn converter(c);\n\t\t} else {\n\t\t\treturn s; // Couldn't convert it as an entity, just return it raw\n\t\t}\n\t}\n};\n\nexports.unescapeLineBreaks = function(s) {\n\treturn s.replace(/\\\\n/mg,\"\\n\").replace(/\\\\b/mg,\" \").replace(/\\\\s/mg,\"\\\\\").replace(/\\r/mg,\"\");\n};\n\n/*\n * Returns an escape sequence for given character. Uses \\x for characters <=\n * 0xFF to save space, \\u for the rest.\n *\n * The code needs to be in sync with th code template in the compilation\n * function for \"action\" nodes.\n */\n// Copied from peg.js, thanks to David Majda\nexports.escape = function(ch) {\n\tvar charCode = ch.charCodeAt(0);\n\tif(charCode <= 0xFF) {\n\t\treturn '\\\\x' + $tw.utils.pad(charCode.toString(16).toUpperCase());\n\t} else {\n\t\treturn '\\\\u' + $tw.utils.pad(charCode.toString(16).toUpperCase(),4);\n\t}\n};\n\n// Turns a string into a legal JavaScript string\n// Copied from peg.js, thanks to David Majda\nexports.stringify = function(s) {\n\t/*\n\t* ECMA-262, 5th ed., 7.8.4: All characters may appear literally in a string\n\t* literal except for the closing quote character, backslash, carriage return,\n\t* line separator, paragraph separator, and line feed. Any character may\n\t* appear in the form of an escape sequence.\n\t*\n\t* For portability, we also escape all non-ASCII characters.\n\t*/\n\treturn (s || \"\")\n\t\t.replace(/\\\\/g, '\\\\\\\\')            // backslash\n\t\t.replace(/\"/g, '\\\\\"')              // double quote character\n\t\t.replace(/'/g, \"\\\\'\")              // single quote character\n\t\t.replace(/\\r/g, '\\\\r')             // carriage return\n\t\t.replace(/\\n/g, '\\\\n')             // line feed\n\t\t.replace(/[\\x80-\\uFFFF]/g, exports.escape); // non-ASCII characters\n};\n\n/*\nEscape the RegExp special characters with a preceding backslash\n*/\nexports.escapeRegExp = function(s) {\n    return s.replace(/[\\-\\/\\\\\\^\\$\\*\\+\\?\\.\\(\\)\\|\\[\\]\\{\\}]/g, '\\\\$&');\n};\n\n// Checks whether a link target is external, i.e. not a tiddler title\nexports.isLinkExternal = function(to) {\n\tvar externalRegExp = /^(?:file|http|https|mailto|ftp|irc|news|data|skype):[^\\s<>{}\\[\\]`|\"\\\\^]+(?:\\/|\\b)/i;\n\treturn externalRegExp.test(to);\n};\n\nexports.nextTick = function(fn) {\n/*global window: false */\n\tif(typeof process === \"undefined\") {\n\t\t// Apparently it would be faster to use postMessage - http://dbaron.org/log/20100309-faster-timeouts\n\t\twindow.setTimeout(fn,4);\n\t} else {\n\t\tprocess.nextTick(fn);\n\t}\n};\n\n/*\nConvert a hyphenated CSS property name into a camel case one\n*/\nexports.unHyphenateCss = function(propName) {\n\treturn propName.replace(/-([a-z])/gi, function(match0,match1) {\n\t\treturn match1.toUpperCase();\n\t});\n};\n\n/*\nConvert a camelcase CSS property name into a dashed one (\"backgroundColor\" --> \"background-color\")\n*/\nexports.hyphenateCss = function(propName) {\n\treturn propName.replace(/([A-Z])/g, function(match0,match1) {\n\t\treturn \"-\" + match1.toLowerCase();\n\t});\n};\n\n/*\nParse a text reference of one of these forms:\n* title\n* !!field\n* title!!field\n* title##index\n* etc\nReturns an object with the following fields, all optional:\n* title: tiddler title\n* field: tiddler field name\n* index: JSON property index\n*/\nexports.parseTextReference = function(textRef) {\n\t// Separate out the title, field name and/or JSON indices\n\tvar reTextRef = /(?:(.*?)!!(.+))|(?:(.*?)##(.+))|(.*)/mg,\n\t\tmatch = reTextRef.exec(textRef),\n\t\tresult = {};\n\tif(match && reTextRef.lastIndex === textRef.length) {\n\t\t// Return the parts\n\t\tif(match[1]) {\n\t\t\tresult.title = match[1];\n\t\t}\n\t\tif(match[2]) {\n\t\t\tresult.field = match[2];\n\t\t}\n\t\tif(match[3]) {\n\t\t\tresult.title = match[3];\n\t\t}\n\t\tif(match[4]) {\n\t\t\tresult.index = match[4];\n\t\t}\n\t\tif(match[5]) {\n\t\t\tresult.title = match[5];\n\t\t}\n\t} else {\n\t\t// If we couldn't parse it\n\t\tresult.title = textRef\n\t}\n\treturn result;\n};\n\n/*\nChecks whether a string is a valid fieldname\n*/\nexports.isValidFieldName = function(name) {\n\tif(!name || typeof name !== \"string\") {\n\t\treturn false;\n\t}\n\tname = name.toLowerCase().trim();\n\tvar fieldValidatorRegEx = /^[a-z0-9\\-\\._]+$/mg;\n\treturn fieldValidatorRegEx.test(name);\n};\n\n/*\nExtract the version number from the meta tag or from the boot file\n*/\n\n// Browser version\nexports.extractVersionInfo = function() {\n\tif($tw.packageInfo) {\n\t\treturn $tw.packageInfo.version;\n\t} else {\n\t\tvar metatags = document.getElementsByTagName(\"meta\");\n\t\tfor(var t=0; t<metatags.length; t++) {\n\t\t\tvar m = metatags[t];\n\t\t\tif(m.name === \"tiddlywiki-version\") {\n\t\t\t\treturn m.content;\n\t\t\t}\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nGet the animation duration in ms\n*/\nexports.getAnimationDuration = function() {\n\treturn parseInt($tw.wiki.getTiddlerText(\"$:/config/AnimationDuration\",\"400\"),10);\n};\n\n/*\nHash a string to a number\nDerived from http://stackoverflow.com/a/15710692\n*/\nexports.hashString = function(str) {\n\treturn str.split(\"\").reduce(function(a,b) {\n\t\ta = ((a << 5) - a) + b.charCodeAt(0);\n\t\treturn a & a;\n\t},0);\n};\n\n/*\nDecode a base64 string\n*/\nexports.base64Decode = function(string64) {\n\tif($tw.browser) {\n\t\t// TODO\n\t\tthrow \"$tw.utils.base64Decode() doesn't work in the browser\";\n\t} else {\n\t\treturn (new Buffer(string64,\"base64\")).toString();\n\t}\n};\n\n/*\nConvert a hashmap into a tiddler dictionary format sequence of name:value pairs\n*/\nexports.makeTiddlerDictionary = function(data) {\n\tvar output = [];\n\tfor(var name in data) {\n\t\toutput.push(name + \": \" + data[name]);\n\t}\n\treturn output.join(\"\\n\");\n};\n\n/*\nHigh resolution microsecond timer for profiling\n*/\nexports.timer = function(base) {\n\tvar m;\n\tif($tw.node) {\n\t\tvar r = process.hrtime();\t\t\n\t\tm =  r[0] * 1e3 + (r[1] / 1e6);\n\t} else if(window.performance) {\n\t\tm = performance.now();\n\t} else {\n\t\tm = Date.now();\n\t}\n\tif(typeof base !== \"undefined\") {\n\t\tm = m - base;\n\t}\n\treturn m;\n};\n\n/*\nConvert text and content type to a data URI\n*/\nexports.makeDataUri = function(text,type) {\n\ttype = type || \"text/vnd.tiddlywiki\";\n\tvar typeInfo = $tw.config.contentTypeInfo[type] || $tw.config.contentTypeInfo[\"text/plain\"],\n\t\tisBase64 = typeInfo.encoding === \"base64\",\n\t\tparts = [];\n\tparts.push(\"data:\");\n\tparts.push(type);\n\tparts.push(isBase64 ? \";base64\" : \"\");\n\tparts.push(\",\");\n\tparts.push(isBase64 ? text : encodeURIComponent(text));\n\treturn parts.join(\"\");\n};\n\n/*\nUseful for finding out the fully escaped CSS selector equivalent to a given tag. For example:\n\n$tw.utils.tagToCssSelector(\"$:/tags/Stylesheet\") --> tc-tagged-\\%24\\%3A\\%2Ftags\\%2FStylesheet\n*/\nexports.tagToCssSelector = function(tagName) {\n\treturn \"tc-tagged-\" + encodeURIComponent(tagName).replace(/[!\"#$%&'()*+,\\-./:;<=>?@[\\\\\\]^`{\\|}~,]/mg,function(c) {\n\t\treturn \"\\\\\" + c;\n\t});\n};\n\n\n/*\nIE does not have sign function\n*/\nexports.sign = Math.sign || function(x) {\n\tx = +x; // convert to a number\n\tif (x === 0 || isNaN(x)) {\n\t\treturn x;\n\t}\n\treturn x > 0 ? 1 : -1;\n};\n\n/*\nIE does not have an endsWith function\n*/\nexports.strEndsWith = function(str,ending,position) {\n\tif(str.endsWith) {\n\t\treturn str.endsWith(ending,position);\n\t} else {\n\t\tif (typeof position !== 'number' || !isFinite(position) || Math.floor(position) !== position || position > str.length) {\n\t\t\tposition = str.length;\n\t\t}\n\t\tposition -= str.length;\n\t\tvar lastIndex = str.indexOf(ending, position);\n\t\treturn lastIndex !== -1 && lastIndex === position;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/utils/utils.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/widgets/action-deletefield.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-deletefield.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to delete fields of a tiddler.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar DeleteFieldWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nDeleteFieldWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nDeleteFieldWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nDeleteFieldWidget.prototype.execute = function() {\n\tthis.actionTiddler = this.getAttribute(\"$tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.actionField = this.getAttribute(\"$field\");\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nDeleteFieldWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"$tiddler\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nDeleteFieldWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\tvar self = this,\n\t\ttiddler = this.wiki.getTiddler(self.actionTiddler),\n\t\tremoveFields = {};\n\tif(this.actionField) {\n\t\tremoveFields[this.actionField] = undefined;\n\t}\n\tif(tiddler) {\n\t\t$tw.utils.each(this.attributes,function(attribute,name) {\n\t\t\tif(name.charAt(0) !== \"$\" && name !== \"title\") {\n\t\t\t\tremoveFields[name] = undefined;\n\t\t\t}\n\t\t});\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(this.wiki.getModificationFields(),tiddler,removeFields,this.wiki.getCreationFields()));\n\t}\n\treturn true; // Action was invoked\n};\n\nexports[\"action-deletefield\"] = DeleteFieldWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-deletefield.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-deletetiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-deletetiddler.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to delete a tiddler.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar DeleteTiddlerWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nDeleteTiddlerWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nDeleteTiddlerWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nDeleteTiddlerWidget.prototype.execute = function() {\n\tthis.actionFilter = this.getAttribute(\"$filter\");\n\tthis.actionTiddler = this.getAttribute(\"$tiddler\");\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nDeleteTiddlerWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"$filter\"] || changedAttributes[\"$tiddler\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nDeleteTiddlerWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\tvar tiddlers = [];\n\tif(this.actionFilter) {\n\t\ttiddlers = this.wiki.filterTiddlers(this.actionFilter,this);\n\t}\n\tif(this.actionTiddler) {\n\t\ttiddlers.push(this.actionTiddler);\n\t}\n\tfor(var t=0; t<tiddlers.length; t++) {\n\t\tthis.wiki.deleteTiddler(tiddlers[t]);\n\t}\n\treturn true; // Action was invoked\n};\n\nexports[\"action-deletetiddler\"] = DeleteTiddlerWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-deletetiddler.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-listops.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-listops.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to apply list operations to any tiddler field (defaults to the 'list' field of the current tiddler)\n\n\\*/\n(function() {\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\nvar ActionListopsWidget = function(parseTreeNode, options) {\n\tthis.initialise(parseTreeNode, options);\n};\n/**\n * Inherit from the base widget class\n */\nActionListopsWidget.prototype = new Widget();\n/**\n * Render this widget into the DOM\n */\nActionListopsWidget.prototype.render = function(parent, nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n/**\n * Compute the internal state of the widget\n */\nActionListopsWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.target = this.getAttribute(\"$tiddler\", this.getVariable(\n\t\t\"currentTiddler\"));\n\tthis.filter = this.getAttribute(\"$filter\");\n\tthis.subfilter = this.getAttribute(\"$subfilter\");\n\tthis.listField = this.getAttribute(\"$field\", \"list\");\n\tthis.listIndex = this.getAttribute(\"$index\");\n\tthis.filtertags = this.getAttribute(\"$tags\");\n};\n/**\n * \tRefresh the widget by ensuring our attributes are up to date\n */\nActionListopsWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.$tiddler || changedAttributes.$filter ||\n\t\tchangedAttributes.$subfilter || changedAttributes.$field ||\n\t\tchangedAttributes.$index || changedAttributes.$tags) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n/**\n * \tInvoke the action associated with this widget\n */\nActionListopsWidget.prototype.invokeAction = function(triggeringWidget,\n\tevent) {\n\t//Apply the specified filters to the lists\n\tvar field = this.listField,\n\t\tindex,\n\t\ttype = \"!!\",\n\t\tlist = this.listField;\n\tif(this.listIndex) {\n\t\tfield = undefined;\n\t\tindex = this.listIndex;\n\t\ttype = \"##\";\n\t\tlist = this.listIndex;\n\t}\n\tif(this.filter) {\n\t\tthis.wiki.setText(this.target, field, index, $tw.utils.stringifyList(\n\t\t\tthis.wiki\n\t\t\t.filterTiddlers(this.filter, this)));\n\t}\n\tif(this.subfilter) {\n\t\tvar subfilter = \"[list[\" + this.target + type + list + \"]] \" + this.subfilter;\n\t\tthis.wiki.setText(this.target, field, index, $tw.utils.stringifyList(\n\t\t\tthis.wiki\n\t\t\t.filterTiddlers(subfilter, this)));\n\t}\n\tif(this.filtertags) {\n\t\tvar tagfilter = \"[list[\" + this.target + \"!!tags]] \" + this.filtertags;\n\t\tthis.wiki.setText(this.target, \"tags\", undefined, $tw.utils.stringifyList(\n\t\t\tthis.wiki.filterTiddlers(tagfilter, this)));\n\t}\n\treturn true; // Action was invoked\n};\n\nexports[\"action-listops\"] = ActionListopsWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-listops.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-navigate.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-navigate.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to navigate to a tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar NavigateWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nNavigateWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nNavigateWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nNavigateWidget.prototype.execute = function() {\n\tthis.actionTo = this.getAttribute(\"$to\");\n\tthis.actionScroll = this.getAttribute(\"$scroll\");\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nNavigateWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"$to\"] || changedAttributes[\"$scroll\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nNavigateWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\tvar bounds = triggeringWidget && triggeringWidget.getBoundingClientRect && triggeringWidget.getBoundingClientRect(),\n\t\tsuppressNavigation = event.metaKey || event.ctrlKey || (event.button === 1);\n\tif(this.actionScroll === \"yes\") {\n\t\tsuppressNavigation = false;\n\t} else if(this.actionScroll === \"no\") {\n\t\tsuppressNavigation = true;\n\t}\n\tthis.dispatchEvent({\n\t\ttype: \"tm-navigate\",\n\t\tnavigateTo: this.actionTo === undefined ? this.getVariable(\"currentTiddler\") : this.actionTo,\n\t\tnavigateFromTitle: this.getVariable(\"storyTiddler\"),\n\t\tnavigateFromNode: triggeringWidget,\n\t\tnavigateFromClientRect: bounds && { top: bounds.top, left: bounds.left, width: bounds.width, right: bounds.right, bottom: bounds.bottom, height: bounds.height\n\t\t},\n\t\tnavigateSuppressNavigation: suppressNavigation\n\t});\n\treturn true; // Action was invoked\n};\n\nexports[\"action-navigate\"] = NavigateWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-navigate.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-sendmessage.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-sendmessage.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to send a message\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar SendMessageWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nSendMessageWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nSendMessageWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nSendMessageWidget.prototype.execute = function() {\n\tthis.actionMessage = this.getAttribute(\"$message\");\n\tthis.actionParam = this.getAttribute(\"$param\");\n\tthis.actionName = this.getAttribute(\"$name\");\n\tthis.actionValue = this.getAttribute(\"$value\",\"\");\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nSendMessageWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(Object.keys(changedAttributes).length) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nSendMessageWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\t// Get the string parameter\n\tvar param = this.actionParam;\n\t// Assemble the attributes as a hashmap\n\tvar paramObject = Object.create(null);\n\tvar count = 0;\n\t$tw.utils.each(this.attributes,function(attribute,name) {\n\t\tif(name.charAt(0) !== \"$\") {\n\t\t\tparamObject[name] = attribute;\n\t\t\tcount++;\n\t\t}\n\t});\n\t// Add name/value pair if present\n\tif(this.actionName) {\n\t\tparamObject[this.actionName] = this.actionValue;\n\t}\n\t// Dispatch the message\n\tthis.dispatchEvent({\n\t\ttype: this.actionMessage,\n\t\tparam: param,\n\t\tparamObject: paramObject,\n\t\ttiddlerTitle: this.getVariable(\"currentTiddler\"),\n\t\tnavigateFromTitle: this.getVariable(\"storyTiddler\")\n\t});\n\treturn true; // Action was invoked\n};\n\nexports[\"action-sendmessage\"] = SendMessageWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-sendmessage.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-setfield.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-setfield.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to set a single field or index on a tiddler.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar SetFieldWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nSetFieldWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nSetFieldWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nSetFieldWidget.prototype.execute = function() {\n\tthis.actionTiddler = this.getAttribute(\"$tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.actionField = this.getAttribute(\"$field\");\n\tthis.actionIndex = this.getAttribute(\"$index\");\n\tthis.actionValue = this.getAttribute(\"$value\");\n\tthis.actionTimestamp = this.getAttribute(\"$timestamp\",\"yes\") === \"yes\";\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nSetFieldWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"$tiddler\"] || changedAttributes[\"$field\"] || changedAttributes[\"$index\"] || changedAttributes[\"$value\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nSetFieldWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\tvar self = this,\n\t\toptions = {};\n\toptions.suppressTimestamp = !this.actionTimestamp;\n\tif((typeof this.actionField == \"string\") || (typeof this.actionIndex == \"string\")  || (typeof this.actionValue == \"string\")) {\n\t\tthis.wiki.setText(this.actionTiddler,this.actionField,this.actionIndex,this.actionValue,options);\n\t}\n\t$tw.utils.each(this.attributes,function(attribute,name) {\n\t\tif(name.charAt(0) !== \"$\") {\n\t\t\tself.wiki.setText(self.actionTiddler,name,undefined,attribute,options);\n\t\t}\n\t});\n\treturn true; // Action was invoked\n};\n\nexports[\"action-setfield\"] = SetFieldWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-setfield.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/browse.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/browse.js\ntype: application/javascript\nmodule-type: widget\n\nBrowse widget for browsing for files to import\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar BrowseWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nBrowseWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nBrowseWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\tvar domNode = this.document.createElement(\"input\");\n\tdomNode.setAttribute(\"type\",\"file\");\n\tif(this.browseMultiple) {\n\t\tdomNode.setAttribute(\"multiple\",\"multiple\");\n\t}\n\tif(this.tooltip) {\n\t\tdomNode.setAttribute(\"title\",this.tooltip);\n\t}\n\t// Nw.js supports \"nwsaveas\" to force a \"save as\" dialogue that allows a new or existing file to be selected\n\tif(this.nwsaveas) {\n\t\tdomNode.setAttribute(\"nwsaveas\",this.nwsaveas);\n\t}\n\t// Nw.js supports \"webkitdirectory\" to allow a directory to be selected\n\tif(this.webkitdirectory) {\n\t\tdomNode.setAttribute(\"webkitdirectory\",this.webkitdirectory);\n\t}\n\t// Add a click event handler\n\tdomNode.addEventListener(\"change\",function (event) {\n\t\tif(self.message) {\n\t\t\tself.dispatchEvent({type: self.message, param: self.param, files: event.target.files});\n\t\t} else {\n\t\t\tself.wiki.readFiles(event.target.files,function(tiddlerFieldsArray) {\n\t\t\t\tself.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify(tiddlerFieldsArray)});\n\t\t\t});\n\t\t}\n\t\treturn false;\n\t},false);\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nBrowseWidget.prototype.execute = function() {\n\tthis.browseMultiple = this.getAttribute(\"multiple\");\n\tthis.message = this.getAttribute(\"message\");\n\tthis.param = this.getAttribute(\"param\");\n\tthis.tooltip = this.getAttribute(\"tooltip\");\n\tthis.nwsaveas = this.getAttribute(\"nwsaveas\");\n\tthis.webkitdirectory = this.getAttribute(\"webkitdirectory\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nBrowseWidget.prototype.refresh = function(changedTiddlers) {\n\treturn false;\n};\n\nexports.browse = BrowseWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/browse.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/button.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/button.js\ntype: application/javascript\nmodule-type: widget\n\nButton widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ButtonWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nButtonWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nButtonWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\tvar tag = \"button\";\n\tif(this.buttonTag && $tw.config.htmlUnsafeElements.indexOf(this.buttonTag) === -1) {\n\t\ttag = this.buttonTag;\n\t}\n\tvar domNode = this.document.createElement(tag);\n\t// Assign classes\n\tvar classes = this[\"class\"].split(\" \") || [],\n\t\tisPoppedUp = this.popup && this.isPoppedUp();\n\tif(this.selectedClass) {\n\t\tif(this.set && this.setTo && this.isSelected()) {\n\t\t\t$tw.utils.pushTop(classes,this.selectedClass.split(\" \"));\n\t\t}\n\t\tif(isPoppedUp) {\n\t\t\t$tw.utils.pushTop(classes,this.selectedClass.split(\" \"));\n\t\t}\n\t}\n\tif(isPoppedUp) {\n\t\t$tw.utils.pushTop(classes,\"tc-popup-handle\");\n\t}\n\tdomNode.className = classes.join(\" \");\n\t// Assign other attributes\n\tif(this.style) {\n\t\tdomNode.setAttribute(\"style\",this.style);\n\t}\n\tif(this.tooltip) {\n\t\tdomNode.setAttribute(\"title\",this.tooltip);\n\t}\n\tif(this[\"aria-label\"]) {\n\t\tdomNode.setAttribute(\"aria-label\",this[\"aria-label\"]);\n\t}\n\t// Add a click event handler\n\tdomNode.addEventListener(\"click\",function (event) {\n\t\tvar handled = false;\n\t\tif(self.invokeActions(this,event)) {\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.to) {\n\t\t\tself.navigateTo(event);\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.message) {\n\t\t\tself.dispatchMessage(event);\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.popup) {\n\t\t\tself.triggerPopup(event);\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.set) {\n\t\t\tself.setTiddler();\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.actions) {\n\t\t\tself.invokeActionString(self.actions,self,event);\n\t\t}\n\t\tif(handled) {\n\t\t\tevent.preventDefault();\n\t\t\tevent.stopPropagation();\n\t\t}\n\t\treturn handled;\n\t},false);\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\n/*\nWe don't allow actions to propagate because we trigger actions ourselves\n*/\nButtonWidget.prototype.allowActionPropagation = function() {\n\treturn false;\n};\n\nButtonWidget.prototype.getBoundingClientRect = function() {\n\treturn this.domNodes[0].getBoundingClientRect();\n};\n\nButtonWidget.prototype.isSelected = function() {\n    return this.wiki.getTextReference(this.set,this.defaultSetValue,this.getVariable(\"currentTiddler\")) === this.setTo;\n};\n\nButtonWidget.prototype.isPoppedUp = function() {\n\tvar tiddler = this.wiki.getTiddler(this.popup);\n\tvar result = tiddler && tiddler.fields.text ? $tw.popup.readPopupState(tiddler.fields.text) : false;\n\treturn result;\n};\n\nButtonWidget.prototype.navigateTo = function(event) {\n\tvar bounds = this.getBoundingClientRect();\n\tthis.dispatchEvent({\n\t\ttype: \"tm-navigate\",\n\t\tnavigateTo: this.to,\n\t\tnavigateFromTitle: this.getVariable(\"storyTiddler\"),\n\t\tnavigateFromNode: this,\n\t\tnavigateFromClientRect: { top: bounds.top, left: bounds.left, width: bounds.width, right: bounds.right, bottom: bounds.bottom, height: bounds.height\n\t\t},\n\t\tnavigateSuppressNavigation: event.metaKey || event.ctrlKey || (event.button === 1)\n\t});\n};\n\nButtonWidget.prototype.dispatchMessage = function(event) {\n\tthis.dispatchEvent({type: this.message, param: this.param, tiddlerTitle: this.getVariable(\"currentTiddler\")});\n};\n\nButtonWidget.prototype.triggerPopup = function(event) {\n\t$tw.popup.triggerPopup({\n\t\tdomNode: this.domNodes[0],\n\t\ttitle: this.popup,\n\t\twiki: this.wiki\n\t});\n};\n\nButtonWidget.prototype.setTiddler = function() {\n\tthis.wiki.setTextReference(this.set,this.setTo,this.getVariable(\"currentTiddler\"));\n};\n\n/*\nCompute the internal state of the widget\n*/\nButtonWidget.prototype.execute = function() {\n\t// Get attributes\n\tthis.actions = this.getAttribute(\"actions\");\n\tthis.to = this.getAttribute(\"to\");\n\tthis.message = this.getAttribute(\"message\");\n\tthis.param = this.getAttribute(\"param\");\n\tthis.set = this.getAttribute(\"set\");\n\tthis.setTo = this.getAttribute(\"setTo\");\n\tthis.popup = this.getAttribute(\"popup\");\n\tthis.hover = this.getAttribute(\"hover\");\n\tthis[\"class\"] = this.getAttribute(\"class\",\"\");\n\tthis[\"aria-label\"] = this.getAttribute(\"aria-label\");\n\tthis.tooltip = this.getAttribute(\"tooltip\");\n\tthis.style = this.getAttribute(\"style\");\n\tthis.selectedClass = this.getAttribute(\"selectedClass\");\n\tthis.defaultSetValue = this.getAttribute(\"default\",\"\");\n\tthis.buttonTag = this.getAttribute(\"tag\");\n\t// Make child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nButtonWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.to || changedAttributes.message || changedAttributes.param || changedAttributes.set || changedAttributes.setTo || changedAttributes.popup || changedAttributes.hover || changedAttributes[\"class\"] || changedAttributes.selectedClass || changedAttributes.style || (this.set && changedTiddlers[this.set]) || (this.popup && changedTiddlers[this.popup])) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.button = ButtonWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/button.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/checkbox.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/checkbox.js\ntype: application/javascript\nmodule-type: widget\n\nCheckbox widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar CheckboxWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nCheckboxWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nCheckboxWidget.prototype.render = function(parent,nextSibling) {\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Create our elements\n\tthis.labelDomNode = this.document.createElement(\"label\");\n\tthis.labelDomNode.setAttribute(\"class\",this.checkboxClass);\n\tthis.inputDomNode = this.document.createElement(\"input\");\n\tthis.inputDomNode.setAttribute(\"type\",\"checkbox\");\n\tif(this.getValue()) {\n\t\tthis.inputDomNode.setAttribute(\"checked\",\"true\");\n\t}\n\tthis.labelDomNode.appendChild(this.inputDomNode);\n\tthis.spanDomNode = this.document.createElement(\"span\");\n\tthis.labelDomNode.appendChild(this.spanDomNode);\n\t// Add a click event handler\n\t$tw.utils.addEventListeners(this.inputDomNode,[\n\t\t{name: \"change\", handlerObject: this, handlerMethod: \"handleChangeEvent\"}\n\t]);\n\t// Insert the label into the DOM and render any children\n\tparent.insertBefore(this.labelDomNode,nextSibling);\n\tthis.renderChildren(this.spanDomNode,null);\n\tthis.domNodes.push(this.labelDomNode);\n};\n\nCheckboxWidget.prototype.getValue = function() {\n\tvar tiddler = this.wiki.getTiddler(this.checkboxTitle);\n\tif(tiddler) {\n\t\tif(this.checkboxTag) {\n\t\t\tif(this.checkboxInvertTag) {\n\t\t\t\treturn !tiddler.hasTag(this.checkboxTag);\n\t\t\t} else {\n\t\t\t\treturn tiddler.hasTag(this.checkboxTag);\n\t\t\t}\n\t\t}\n\t\tif(this.checkboxField) {\n\t\t\tvar value = tiddler.fields[this.checkboxField] || this.checkboxDefault || \"\";\n\t\t\tif(value === this.checkboxChecked) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\tif(value === this.checkboxUnchecked) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif(this.checkboxTag) {\n\t\t\treturn false;\n\t\t}\n\t\tif(this.checkboxField) {\n\t\t\tif(this.checkboxDefault === this.checkboxChecked) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\tif(this.checkboxDefault === this.checkboxUnchecked) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n};\n\nCheckboxWidget.prototype.handleChangeEvent = function(event) {\n\tvar checked = this.inputDomNode.checked,\n\t\ttiddler = this.wiki.getTiddler(this.checkboxTitle),\n\t\tfallbackFields = {text: \"\"},\n\t\tnewFields = {title: this.checkboxTitle},\n\t\thasChanged = false,\n\t\ttagCheck = false,\n\t\thasTag = tiddler && tiddler.hasTag(this.checkboxTag);\n\tif(this.checkboxTag && this.checkboxInvertTag === \"yes\") {\n\t\ttagCheck = hasTag === checked;\n\t} else {\n\t\ttagCheck = hasTag !== checked;\n\t}\n\t// Set the tag if specified\n\tif(this.checkboxTag && (!tiddler || tagCheck)) {\n\t\tnewFields.tags = tiddler ? (tiddler.fields.tags || []).slice(0) : [];\n\t\tvar pos = newFields.tags.indexOf(this.checkboxTag);\n\t\tif(pos !== -1) {\n\t\t\tnewFields.tags.splice(pos,1);\n\t\t}\n\t\tif(this.checkboxInvertTag === \"yes\" && !checked) {\n\t\t\tnewFields.tags.push(this.checkboxTag);\n\t\t} else if(this.checkboxInvertTag !== \"yes\" && checked) {\n\t\t\tnewFields.tags.push(this.checkboxTag);\n\t\t}\n\t\thasChanged = true;\n\t}\n\t// Set the field if specified\n\tif(this.checkboxField) {\n\t\tvar value = checked ? this.checkboxChecked : this.checkboxUnchecked;\n\t\tif(!tiddler || tiddler.fields[this.checkboxField] !== value) {\n\t\t\tnewFields[this.checkboxField] = value;\n\t\t\thasChanged = true;\n\t\t}\n\t}\n\tif(hasChanged) {\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(this.wiki.getCreationFields(),fallbackFields,tiddler,newFields,this.wiki.getModificationFields()));\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nCheckboxWidget.prototype.execute = function() {\n\t// Get the parameters from the attributes\n\tthis.checkboxTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.checkboxTag = this.getAttribute(\"tag\");\n\tthis.checkboxField = this.getAttribute(\"field\");\n\tthis.checkboxChecked = this.getAttribute(\"checked\");\n\tthis.checkboxUnchecked = this.getAttribute(\"unchecked\");\n\tthis.checkboxDefault = this.getAttribute(\"default\");\n\tthis.checkboxClass = this.getAttribute(\"class\",\"\");\n\tthis.checkboxInvertTag = this.getAttribute(\"invertTag\",\"\");\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nCheckboxWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.tag || changedAttributes.invertTag || changedAttributes.field || changedAttributes.checked || changedAttributes.unchecked || changedAttributes[\"default\"] || changedAttributes[\"class\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\tvar refreshed = false;\n\t\tif(changedTiddlers[this.checkboxTitle]) {\n\t\t\tthis.inputDomNode.checked = this.getValue();\n\t\t\trefreshed = true;\n\t\t}\n\t\treturn this.refreshChildren(changedTiddlers) || refreshed;\n\t}\n};\n\nexports.checkbox = CheckboxWidget;\n\n})();",
            "title": "$:/core/modules/widgets/checkbox.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/codeblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/codeblock.js\ntype: application/javascript\nmodule-type: widget\n\nCode block node widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar CodeBlockWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nCodeBlockWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nCodeBlockWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar codeNode = this.document.createElement(\"code\"),\n\t\tdomNode = this.document.createElement(\"pre\");\n\tcodeNode.appendChild(this.document.createTextNode(this.getAttribute(\"code\")));\n\tdomNode.appendChild(codeNode);\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.domNodes.push(domNode);\n\tif(this.postRender) {\n\t\tthis.postRender();\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nCodeBlockWidget.prototype.execute = function() {\n\tthis.language = this.getAttribute(\"language\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nCodeBlockWidget.prototype.refresh = function(changedTiddlers) {\n\treturn false;\n};\n\nexports.codeblock = CodeBlockWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/codeblock.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/count.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/count.js\ntype: application/javascript\nmodule-type: widget\n\nCount widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar CountWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nCountWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nCountWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar textNode = this.document.createTextNode(this.currentCount);\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nCountWidget.prototype.execute = function() {\n\t// Get parameters from our attributes\n\tthis.filter = this.getAttribute(\"filter\");\n\t// Execute the filter\n\tif(this.filter) {\n\t\tthis.currentCount = this.wiki.filterTiddlers(this.filter,this).length;\n\t} else {\n\t\tthis.currentCount = undefined;\n\t}\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nCountWidget.prototype.refresh = function(changedTiddlers) {\n\t// Re-execute the filter to get the count\n\tthis.computeAttributes();\n\tvar oldCount = this.currentCount;\n\tthis.execute();\n\tif(this.currentCount !== oldCount) {\n\t\t// Regenerate and rerender the widget and replace the existing DOM node\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\n\t}\n\n};\n\nexports.count = CountWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/count.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/dropzone.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/dropzone.js\ntype: application/javascript\nmodule-type: widget\n\nDropzone widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar DropZoneWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nDropZoneWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nDropZoneWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\tvar domNode = this.document.createElement(\"div\");\n\tdomNode.className = \"tc-dropzone\";\n\t// Add event handlers\n\t$tw.utils.addEventListeners(domNode,[\n\t\t{name: \"dragenter\", handlerObject: this, handlerMethod: \"handleDragEnterEvent\"},\n\t\t{name: \"dragover\", handlerObject: this, handlerMethod: \"handleDragOverEvent\"},\n\t\t{name: \"dragleave\", handlerObject: this, handlerMethod: \"handleDragLeaveEvent\"},\n\t\t{name: \"drop\", handlerObject: this, handlerMethod: \"handleDropEvent\"},\n\t\t{name: \"paste\", handlerObject: this, handlerMethod: \"handlePasteEvent\"}\n\t]);\n\tdomNode.addEventListener(\"click\",function (event) {\n\t},false);\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\nDropZoneWidget.prototype.enterDrag = function() {\n\t// Check for this window being the source of the drag\n\tif($tw.dragInProgress) {\n\t\treturn false;\n\t}\n\t// We count enter/leave events\n\tthis.dragEnterCount = (this.dragEnterCount || 0) + 1;\n\t// If we're entering for the first time we need to apply highlighting\n\tif(this.dragEnterCount === 1) {\n\t\t$tw.utils.addClass(this.domNodes[0],\"tc-dragover\");\n\t}\n};\n\nDropZoneWidget.prototype.leaveDrag = function() {\n\t// Reduce the enter count\n\tthis.dragEnterCount = (this.dragEnterCount || 0) - 1;\n\t// Remove highlighting if we're leaving externally\n\tif(this.dragEnterCount <= 0) {\n\t\t$tw.utils.removeClass(this.domNodes[0],\"tc-dragover\");\n\t}\n};\n\nDropZoneWidget.prototype.handleDragEnterEvent  = function(event) {\n\tthis.enterDrag();\n\t// Tell the browser that we're ready to handle the drop\n\tevent.preventDefault();\n\t// Tell the browser not to ripple the drag up to any parent drop handlers\n\tevent.stopPropagation();\n};\n\nDropZoneWidget.prototype.handleDragOverEvent  = function(event) {\n\t// Check for being over a TEXTAREA or INPUT\n\tif([\"TEXTAREA\",\"INPUT\"].indexOf(event.target.tagName) !== -1) {\n\t\treturn false;\n\t}\n\t// Check for this window being the source of the drag\n\tif($tw.dragInProgress) {\n\t\treturn false;\n\t}\n\t// Tell the browser that we're still interested in the drop\n\tevent.preventDefault();\n\tevent.dataTransfer.dropEffect = \"copy\"; // Explicitly show this is a copy\n};\n\nDropZoneWidget.prototype.handleDragLeaveEvent  = function(event) {\n\tthis.leaveDrag();\n};\n\nDropZoneWidget.prototype.handleDropEvent  = function(event) {\n\tthis.leaveDrag();\n\t// Check for being over a TEXTAREA or INPUT\n\tif([\"TEXTAREA\",\"INPUT\"].indexOf(event.target.tagName) !== -1) {\n\t\treturn false;\n\t}\n\t// Check for this window being the source of the drag\n\tif($tw.dragInProgress) {\n\t\treturn false;\n\t}\n\tvar self = this,\n\t\tdataTransfer = event.dataTransfer;\n\t// Reset the enter count\n\tthis.dragEnterCount = 0;\n\t// Remove highlighting\n\t$tw.utils.removeClass(this.domNodes[0],\"tc-dragover\");\n\t// Import any files in the drop\n\tvar numFiles = this.wiki.readFiles(dataTransfer.files,function(tiddlerFieldsArray) {\n\t\tself.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify(tiddlerFieldsArray)});\n\t});\n\t// Try to import the various data types we understand\n\tif(numFiles === 0) {\n\t\tthis.importData(dataTransfer);\n\t}\n\t// Tell the browser that we handled the drop\n\tevent.preventDefault();\n\t// Stop the drop ripple up to any parent handlers\n\tevent.stopPropagation();\n};\n\nDropZoneWidget.prototype.importData = function(dataTransfer) {\n\t// Try each provided data type in turn\n\tfor(var t=0; t<this.importDataTypes.length; t++) {\n\t\tif(!$tw.browser.isIE || this.importDataTypes[t].IECompatible) {\n\t\t\t// Get the data\n\t\t\tvar dataType = this.importDataTypes[t];\n\t\t\t\tvar data = dataTransfer.getData(dataType.type);\n\t\t\t// Import the tiddlers in the data\n\t\t\tif(data !== \"\" && data !== null) {\n\t\t\t\tif($tw.log.IMPORT) {\n\t\t\t\t\tconsole.log(\"Importing data type '\" + dataType.type + \"', data: '\" + data + \"'\")\n\t\t\t\t}\n\t\t\t\tvar tiddlerFields = dataType.convertToFields(data);\n\t\t\t\tif(!tiddlerFields.title) {\n\t\t\t\t\ttiddlerFields.title = this.wiki.generateNewTitle(\"Untitled\");\n\t\t\t\t}\n\t\t\t\tthis.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify([tiddlerFields])});\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t}\n};\n\nDropZoneWidget.prototype.importDataTypes = [\n\t{type: \"text/vnd.tiddler\", IECompatible: false, convertToFields: function(data) {\n\t\treturn JSON.parse(data);\n\t}},\n\t{type: \"URL\", IECompatible: true, convertToFields: function(data) {\n\t\t// Check for tiddler data URI\n\t\tvar match = decodeURIComponent(data).match(/^data\\:text\\/vnd\\.tiddler,(.*)/i);\n\t\tif(match) {\n\t\t\treturn JSON.parse(match[1]);\n\t\t} else {\n\t\t\treturn { // As URL string\n\t\t\t\ttext: data\n\t\t\t};\n\t\t}\n\t}},\n\t{type: \"text/x-moz-url\", IECompatible: false, convertToFields: function(data) {\n\t\t// Check for tiddler data URI\n\t\tvar match = decodeURIComponent(data).match(/^data\\:text\\/vnd\\.tiddler,(.*)/i);\n\t\tif(match) {\n\t\t\treturn JSON.parse(match[1]);\n\t\t} else {\n\t\t\treturn { // As URL string\n\t\t\t\ttext: data\n\t\t\t};\n\t\t}\n\t}},\n\t{type: \"text/html\", IECompatible: false, convertToFields: function(data) {\n\t\treturn {\n\t\t\ttext: data\n\t\t};\n\t}},\n\t{type: \"text/plain\", IECompatible: false, convertToFields: function(data) {\n\t\treturn {\n\t\t\ttext: data\n\t\t};\n\t}},\n\t{type: \"Text\", IECompatible: true, convertToFields: function(data) {\n\t\treturn {\n\t\t\ttext: data\n\t\t};\n\t}},\n\t{type: \"text/uri-list\", IECompatible: false, convertToFields: function(data) {\n\t\treturn {\n\t\t\ttext: data\n\t\t};\n\t}}\n];\n\nDropZoneWidget.prototype.handlePasteEvent  = function(event) {\n\t// Let the browser handle it if we're in a textarea or input box\n\tif([\"TEXTAREA\",\"INPUT\"].indexOf(event.target.tagName) == -1) {\n\t\tvar self = this,\n\t\t\titems = event.clipboardData.items;\n\t\t// Enumerate the clipboard items\n\t\tfor(var t = 0; t<items.length; t++) {\n\t\t\tvar item = items[t];\n\t\t\tif(item.kind === \"file\") {\n\t\t\t\t// Import any files\n\t\t\t\tthis.wiki.readFile(item.getAsFile(),function(tiddlerFieldsArray) {\n\t\t\t\t\tself.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify(tiddlerFieldsArray)});\n\t\t\t\t});\n\t\t\t} else if(item.kind === \"string\") {\n\t\t\t\t// Create tiddlers from string items\n\t\t\t\tvar type = item.type;\n\t\t\t\titem.getAsString(function(str) {\n\t\t\t\t\tvar tiddlerFields = {\n\t\t\t\t\t\ttitle: self.wiki.generateNewTitle(\"Untitled\"),\n\t\t\t\t\t\ttext: str,\n\t\t\t\t\t\ttype: type\n\t\t\t\t\t};\n\t\t\t\t\tif($tw.log.IMPORT) {\n\t\t\t\t\t\tconsole.log(\"Importing string '\" + str + \"', type: '\" + type + \"'\");\n\t\t\t\t\t}\n\t\t\t\t\tself.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify([tiddlerFields])});\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t\t// Tell the browser that we've handled the paste\n\t\tevent.stopPropagation();\n\t\tevent.preventDefault();\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nDropZoneWidget.prototype.execute = function() {\n\t// Make child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nDropZoneWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.dropzone = DropZoneWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/dropzone.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit-binary.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit-binary.js\ntype: application/javascript\nmodule-type: widget\n\nEdit-binary widget; placeholder for editing binary tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar BINARY_WARNING_MESSAGE = \"$:/core/ui/BinaryWarning\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EditBinaryWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEditBinaryWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEditBinaryWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nEditBinaryWidget.prototype.execute = function() {\n\t// Construct the child widgets\n\tthis.makeChildWidgets([{\n\t\ttype: \"transclude\",\n\t\tattributes: {\n\t\t\ttiddler: {type: \"string\", value: BINARY_WARNING_MESSAGE}\n\t\t}\n\t}]);\n};\n\n/*\nRefresh by refreshing our child widget\n*/\nEditBinaryWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports[\"edit-binary\"] = EditBinaryWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/edit-binary.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit-bitmap.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit-bitmap.js\ntype: application/javascript\nmodule-type: widget\n\nEdit-bitmap widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Default image sizes\nvar DEFAULT_IMAGE_WIDTH = 600,\n\tDEFAULT_IMAGE_HEIGHT = 370;\n\n// Configuration tiddlers\nvar LINE_WIDTH_TITLE = \"$:/config/BitmapEditor/LineWidth\",\n\tLINE_COLOUR_TITLE = \"$:/config/BitmapEditor/Colour\",\n\tLINE_OPACITY_TITLE = \"$:/config/BitmapEditor/Opacity\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EditBitmapWidget = function(parseTreeNode,options) {\n\t// Initialise the editor operations if they've not been done already\n\tif(!this.editorOperations) {\n\t\tEditBitmapWidget.prototype.editorOperations = {};\n\t\t$tw.modules.applyMethods(\"bitmapeditoroperation\",this.editorOperations);\n\t}\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEditBitmapWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEditBitmapWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Create the wrapper for the toolbar and render its content\n\tthis.toolbarNode = this.document.createElement(\"div\");\n\tthis.toolbarNode.className = \"tc-editor-toolbar\";\n\tparent.insertBefore(this.toolbarNode,nextSibling);\n\tthis.domNodes.push(this.toolbarNode);\n\t// Create the on-screen canvas\n\tthis.canvasDomNode = $tw.utils.domMaker(\"canvas\",{\n\t\tdocument: this.document,\n\t\t\"class\":\"tc-edit-bitmapeditor\",\n\t\teventListeners: [{\n\t\t\tname: \"touchstart\", handlerObject: this, handlerMethod: \"handleTouchStartEvent\"\n\t\t},{\n\t\t\tname: \"touchmove\", handlerObject: this, handlerMethod: \"handleTouchMoveEvent\"\n\t\t},{\n\t\t\tname: \"touchend\", handlerObject: this, handlerMethod: \"handleTouchEndEvent\"\n\t\t},{\n\t\t\tname: \"mousedown\", handlerObject: this, handlerMethod: \"handleMouseDownEvent\"\n\t\t},{\n\t\t\tname: \"mousemove\", handlerObject: this, handlerMethod: \"handleMouseMoveEvent\"\n\t\t},{\n\t\t\tname: \"mouseup\", handlerObject: this, handlerMethod: \"handleMouseUpEvent\"\n\t\t}]\n\t});\n\t// Set the width and height variables\n\tthis.setVariable(\"tv-bitmap-editor-width\",this.canvasDomNode.width + \"px\");\n\tthis.setVariable(\"tv-bitmap-editor-height\",this.canvasDomNode.height + \"px\");\n\t// Render toolbar child widgets\n\tthis.renderChildren(this.toolbarNode,null);\n\t// // Insert the elements into the DOM\n\tparent.insertBefore(this.canvasDomNode,nextSibling);\n\tthis.domNodes.push(this.canvasDomNode);\n\t// Load the image into the canvas\n\tif($tw.browser) {\n\t\tthis.loadCanvas();\n\t}\n\t// Add widget message listeners\n\tthis.addEventListeners([\n\t\t{type: \"tm-edit-bitmap-operation\", handler: \"handleEditBitmapOperationMessage\"}\n\t]);\n};\n\n/*\nHandle an edit bitmap operation message from the toolbar\n*/\nEditBitmapWidget.prototype.handleEditBitmapOperationMessage = function(event) {\n\t// Invoke the handler\n\tvar handler = this.editorOperations[event.param];\n\tif(handler) {\n\t\thandler.call(this,event);\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nEditBitmapWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.editTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nJust refresh the toolbar\n*/\nEditBitmapWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nSet the bitmap size variables and refresh the toolbar\n*/\nEditBitmapWidget.prototype.refreshToolbar = function() {\n\t// Set the width and height variables\n\tthis.setVariable(\"tv-bitmap-editor-width\",this.canvasDomNode.width + \"px\");\n\tthis.setVariable(\"tv-bitmap-editor-height\",this.canvasDomNode.height + \"px\");\n\t// Refresh each of our child widgets\n\t$tw.utils.each(this.children,function(childWidget) {\n\t\tchildWidget.refreshSelf();\n\t});\n};\n\nEditBitmapWidget.prototype.loadCanvas = function() {\n\tvar tiddler = this.wiki.getTiddler(this.editTitle),\n\t\tcurrImage = new Image();\n\t// Set up event handlers for loading the image\n\tvar self = this;\n\tcurrImage.onload = function() {\n\t\t// Copy the image to the on-screen canvas\n\t\tself.initCanvas(self.canvasDomNode,currImage.width,currImage.height,currImage);\n\t\t// And also copy the current bitmap to the off-screen canvas\n\t\tself.currCanvas = self.document.createElement(\"canvas\");\n\t\tself.initCanvas(self.currCanvas,currImage.width,currImage.height,currImage);\n\t\t// Set the width and height input boxes\n\t\tself.refreshToolbar();\n\t};\n\tcurrImage.onerror = function() {\n\t\t// Set the on-screen canvas size and clear it\n\t\tself.initCanvas(self.canvasDomNode,DEFAULT_IMAGE_WIDTH,DEFAULT_IMAGE_HEIGHT);\n\t\t// Set the off-screen canvas size and clear it\n\t\tself.currCanvas = self.document.createElement(\"canvas\");\n\t\tself.initCanvas(self.currCanvas,DEFAULT_IMAGE_WIDTH,DEFAULT_IMAGE_HEIGHT);\n\t\t// Set the width and height input boxes\n\t\tself.refreshToolbar();\n\t};\n\t// Get the current bitmap into an image object\n\tcurrImage.src = \"data:\" + tiddler.fields.type + \";base64,\" + tiddler.fields.text;\n};\n\nEditBitmapWidget.prototype.initCanvas = function(canvas,width,height,image) {\n\tcanvas.width = width;\n\tcanvas.height = height;\n\tvar ctx = canvas.getContext(\"2d\");\n\tif(image) {\n\t\tctx.drawImage(image,0,0);\n\t} else {\n\t\tctx.fillStyle = \"#fff\";\n\t\tctx.fillRect(0,0,canvas.width,canvas.height);\n\t}\n};\n\n/*\n** Change the size of the canvas, preserving the current image\n*/\nEditBitmapWidget.prototype.changeCanvasSize = function(newWidth,newHeight) {\n\t// Create and size a new canvas\n\tvar newCanvas = this.document.createElement(\"canvas\");\n\tthis.initCanvas(newCanvas,newWidth,newHeight);\n\t// Copy the old image\n\tvar ctx = newCanvas.getContext(\"2d\");\n\tctx.drawImage(this.currCanvas,0,0);\n\t// Set the new canvas as the current one\n\tthis.currCanvas = newCanvas;\n\t// Set the size of the onscreen canvas\n\tthis.canvasDomNode.width = newWidth;\n\tthis.canvasDomNode.height = newHeight;\n\t// Paint the onscreen canvas with the offscreen canvas\n\tctx = this.canvasDomNode.getContext(\"2d\");\n\tctx.drawImage(this.currCanvas,0,0);\n};\n\nEditBitmapWidget.prototype.handleTouchStartEvent = function(event) {\n\tthis.brushDown = true;\n\tthis.strokeStart(event.touches[0].clientX,event.touches[0].clientY);\n\tevent.preventDefault();\n\tevent.stopPropagation();\n\treturn false;\n};\n\nEditBitmapWidget.prototype.handleTouchMoveEvent = function(event) {\n\tif(this.brushDown) {\n\t\tthis.strokeMove(event.touches[0].clientX,event.touches[0].clientY);\n\t}\n\tevent.preventDefault();\n\tevent.stopPropagation();\n\treturn false;\n};\n\nEditBitmapWidget.prototype.handleTouchEndEvent = function(event) {\n\tif(this.brushDown) {\n\t\tthis.brushDown = false;\n\t\tthis.strokeEnd();\n\t}\n\tevent.preventDefault();\n\tevent.stopPropagation();\n\treturn false;\n};\n\nEditBitmapWidget.prototype.handleMouseDownEvent = function(event) {\n\tthis.strokeStart(event.clientX,event.clientY);\n\tthis.brushDown = true;\n\tevent.preventDefault();\n\tevent.stopPropagation();\n\treturn false;\n};\n\nEditBitmapWidget.prototype.handleMouseMoveEvent = function(event) {\n\tif(this.brushDown) {\n\t\tthis.strokeMove(event.clientX,event.clientY);\n\t\tevent.preventDefault();\n\t\tevent.stopPropagation();\n\t\treturn false;\n\t}\n\treturn true;\n};\n\nEditBitmapWidget.prototype.handleMouseUpEvent = function(event) {\n\tif(this.brushDown) {\n\t\tthis.brushDown = false;\n\t\tthis.strokeEnd();\n\t\tevent.preventDefault();\n\t\tevent.stopPropagation();\n\t\treturn false;\n\t}\n\treturn true;\n};\n\nEditBitmapWidget.prototype.adjustCoordinates = function(x,y) {\n\tvar canvasRect = this.canvasDomNode.getBoundingClientRect(),\n\t\tscale = this.canvasDomNode.width/canvasRect.width;\n\treturn {x: (x - canvasRect.left) * scale, y: (y - canvasRect.top) * scale};\n};\n\nEditBitmapWidget.prototype.strokeStart = function(x,y) {\n\t// Start off a new stroke\n\tthis.stroke = [this.adjustCoordinates(x,y)];\n};\n\nEditBitmapWidget.prototype.strokeMove = function(x,y) {\n\tvar ctx = this.canvasDomNode.getContext(\"2d\"),\n\t\tt;\n\t// Add the new position to the end of the stroke\n\tthis.stroke.push(this.adjustCoordinates(x,y));\n\t// Redraw the previous image\n\tctx.drawImage(this.currCanvas,0,0);\n\t// Render the stroke\n\tctx.globalAlpha = parseFloat(this.wiki.getTiddlerText(LINE_OPACITY_TITLE,\"1.0\"));\n\tctx.strokeStyle = this.wiki.getTiddlerText(LINE_COLOUR_TITLE,\"#ff0\");\n\tctx.lineWidth = parseFloat(this.wiki.getTiddlerText(LINE_WIDTH_TITLE,\"3\"));\n\tctx.lineCap = \"round\";\n\tctx.lineJoin = \"round\";\n\tctx.beginPath();\n\tctx.moveTo(this.stroke[0].x,this.stroke[0].y);\n\tfor(t=1; t<this.stroke.length-1; t++) {\n\t\tvar s1 = this.stroke[t],\n\t\t\ts2 = this.stroke[t-1],\n\t\t\ttx = (s1.x + s2.x)/2,\n\t\t\tty = (s1.y + s2.y)/2;\n\t\tctx.quadraticCurveTo(s2.x,s2.y,tx,ty);\n\t}\n\tctx.stroke();\n};\n\nEditBitmapWidget.prototype.strokeEnd = function() {\n\t// Copy the bitmap to the off-screen canvas\n\tvar ctx = this.currCanvas.getContext(\"2d\");\n\tctx.drawImage(this.canvasDomNode,0,0);\n\t// Save the image into the tiddler\n\tthis.saveChanges();\n};\n\nEditBitmapWidget.prototype.saveChanges = function() {\n\tvar tiddler = this.wiki.getTiddler(this.editTitle);\n\tif(tiddler) {\n\t\t// data URIs look like \"data:<type>;base64,<text>\"\n\t\tvar dataURL = this.canvasDomNode.toDataURL(tiddler.fields.type),\n\t\t\tposColon = dataURL.indexOf(\":\"),\n\t\t\tposSemiColon = dataURL.indexOf(\";\"),\n\t\t\tposComma = dataURL.indexOf(\",\"),\n\t\t\ttype = dataURL.substring(posColon+1,posSemiColon),\n\t\t\ttext = dataURL.substring(posComma+1);\n\t\tvar update = {type: type, text: text};\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(this.wiki.getModificationFields(),tiddler,update,this.wiki.getCreationFields()));\n\t}\n};\n\nexports[\"edit-bitmap\"] = EditBitmapWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/edit-bitmap.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit-shortcut.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit-shortcut.js\ntype: application/javascript\nmodule-type: widget\n\nWidget to display an editable keyboard shortcut\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EditShortcutWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEditShortcutWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEditShortcutWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.inputNode = this.document.createElement(\"input\");\n\t// Assign classes\n\tif(this.shortcutClass) {\n\t\tthis.inputNode.className = this.shortcutClass;\t\t\n\t}\n\t// Assign other attributes\n\tif(this.shortcutStyle) {\n\t\tthis.inputNode.setAttribute(\"style\",this.shortcutStyle);\n\t}\n\tif(this.shortcutTooltip) {\n\t\tthis.inputNode.setAttribute(\"title\",this.shortcutTooltip);\n\t}\n\tif(this.shortcutPlaceholder) {\n\t\tthis.inputNode.setAttribute(\"placeholder\",this.shortcutPlaceholder);\n\t}\n\tif(this.shortcutAriaLabel) {\n\t\tthis.inputNode.setAttribute(\"aria-label\",this.shortcutAriaLabel);\n\t}\n\t// Assign the current shortcut\n\tthis.updateInputNode();\n\t// Add event handlers\n\t$tw.utils.addEventListeners(this.inputNode,[\n\t\t{name: \"keydown\", handlerObject: this, handlerMethod: \"handleKeydownEvent\"}\n\t]);\n\t// Link into the DOM\n\tparent.insertBefore(this.inputNode,nextSibling);\n\tthis.domNodes.push(this.inputNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nEditShortcutWidget.prototype.execute = function() {\n\tthis.shortcutTiddler = this.getAttribute(\"tiddler\");\n\tthis.shortcutField = this.getAttribute(\"field\");\n\tthis.shortcutIndex = this.getAttribute(\"index\");\n\tthis.shortcutPlaceholder = this.getAttribute(\"placeholder\");\n\tthis.shortcutDefault = this.getAttribute(\"default\",\"\");\n\tthis.shortcutClass = this.getAttribute(\"class\");\n\tthis.shortcutStyle = this.getAttribute(\"style\");\n\tthis.shortcutTooltip = this.getAttribute(\"tooltip\");\n\tthis.shortcutAriaLabel = this.getAttribute(\"aria-label\");\n};\n\n/*\nUpdate the value of the input node\n*/\nEditShortcutWidget.prototype.updateInputNode = function() {\n\tif(this.shortcutField) {\n\t\tvar tiddler = this.wiki.getTiddler(this.shortcutTiddler);\n\t\tif(tiddler && $tw.utils.hop(tiddler.fields,this.shortcutField)) {\n\t\t\tthis.inputNode.value = tiddler.getFieldString(this.shortcutField);\n\t\t} else {\n\t\t\tthis.inputNode.value = this.shortcutDefault;\n\t\t}\n\t} else if(this.shortcutIndex) {\n\t\tthis.inputNode.value = this.wiki.extractTiddlerDataItem(this.shortcutTiddler,this.shortcutIndex,this.shortcutDefault);\n\t} else {\n\t\tthis.inputNode.value = this.wiki.getTiddlerText(this.shortcutTiddler,this.shortcutDefault);\n\t}\n};\n\n/*\nHandle a dom \"keydown\" event\n*/\nEditShortcutWidget.prototype.handleKeydownEvent = function(event) {\n\t// Ignore shift, ctrl, meta, alt\n\tif(event.keyCode && $tw.keyboardManager.getModifierKeys().indexOf(event.keyCode) === -1) {\n\t\t// Get the shortcut text representation\n\t\tvar value = $tw.keyboardManager.getPrintableShortcuts([{\n\t\t\tctrlKey: event.ctrlKey,\n\t\t\tshiftKey: event.shiftKey,\n\t\t\taltKey: event.altKey,\n\t\t\tmetaKey: event.metaKey,\n\t\t\tkeyCode: event.keyCode\n\t\t}]);\n\t\tif(value.length > 0) {\n\t\t\tthis.wiki.setText(this.shortcutTiddler,this.shortcutField,this.shortcutIndex,value[0]);\n\t\t}\n\t\t// Ignore the keydown if it was already handled\n\t\tevent.preventDefault();\n\t\tevent.stopPropagation();\n\t\treturn true;\t\t\n\t} else {\n\t\treturn false;\n\t}\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget needed re-rendering\n*/\nEditShortcutWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || changedAttributes.placeholder || changedAttributes[\"default\"] || changedAttributes[\"class\"] || changedAttributes.style || changedAttributes.tooltip || changedAttributes[\"aria-label\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else if(changedTiddlers[this.shortcutTiddler]) {\n\t\tthis.updateInputNode();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports[\"edit-shortcut\"] = EditShortcutWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/edit-shortcut.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit-text.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit-text.js\ntype: application/javascript\nmodule-type: widget\n\nEdit-text widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar editTextWidgetFactory = require(\"$:/core/modules/editor/factory.js\").editTextWidgetFactory,\n\tFramedEngine = require(\"$:/core/modules/editor/engines/framed.js\").FramedEngine,\n\tSimpleEngine = require(\"$:/core/modules/editor/engines/simple.js\").SimpleEngine;\n\nexports[\"edit-text\"] = editTextWidgetFactory(FramedEngine,SimpleEngine);\n\n})();\n",
            "title": "$:/core/modules/widgets/edit-text.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit.js\ntype: application/javascript\nmodule-type: widget\n\nEdit widget is a meta-widget chooses the appropriate actual editting widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EditWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEditWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEditWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n// Mappings from content type to editor type are stored in tiddlers with this prefix\nvar EDITOR_MAPPING_PREFIX = \"$:/config/EditorTypeMappings/\";\n\n/*\nCompute the internal state of the widget\n*/\nEditWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.editTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.editField = this.getAttribute(\"field\",\"text\");\n\tthis.editIndex = this.getAttribute(\"index\");\n\tthis.editClass = this.getAttribute(\"class\");\n\tthis.editPlaceholder = this.getAttribute(\"placeholder\");\n\t// Choose the appropriate edit widget\n\tthis.editorType = this.getEditorType();\n\t// Make the child widgets\n\tthis.makeChildWidgets([{\n\t\ttype: \"edit-\" + this.editorType,\n\t\tattributes: {\n\t\t\ttiddler: {type: \"string\", value: this.editTitle},\n\t\t\tfield: {type: \"string\", value: this.editField},\n\t\t\tindex: {type: \"string\", value: this.editIndex},\n\t\t\t\"class\": {type: \"string\", value: this.editClass},\n\t\t\t\"placeholder\": {type: \"string\", value: this.editPlaceholder}\n\t\t},\n\t\tchildren: this.parseTreeNode.children\n\t}]);\n};\n\nEditWidget.prototype.getEditorType = function() {\n\t// Get the content type of the thing we're editing\n\tvar type;\n\tif(this.editField === \"text\") {\n\t\tvar tiddler = this.wiki.getTiddler(this.editTitle);\n\t\tif(tiddler) {\n\t\t\ttype = tiddler.fields.type;\n\t\t}\n\t}\n\ttype = type || \"text/vnd.tiddlywiki\";\n\tvar editorType = this.wiki.getTiddlerText(EDITOR_MAPPING_PREFIX + type);\n\tif(!editorType) {\n\t\tvar typeInfo = $tw.config.contentTypeInfo[type];\n\t\tif(typeInfo && typeInfo.encoding === \"base64\") {\n\t\t\teditorType = \"binary\";\n\t\t} else {\n\t\t\teditorType = \"text\";\n\t\t}\n\t}\n\treturn editorType;\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nEditWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\t// Refresh if an attribute has changed, or the type associated with the target tiddler has changed\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || (changedTiddlers[this.editTitle] && this.getEditorType() !== this.editorType)) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nexports.edit = EditWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/edit.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/element.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/element.js\ntype: application/javascript\nmodule-type: widget\n\nElement widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ElementWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nElementWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nElementWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Neuter blacklisted elements\n\tvar tag = this.parseTreeNode.tag;\n\tif($tw.config.htmlUnsafeElements.indexOf(tag) !== -1) {\n\t\ttag = \"safe-\" + tag;\n\t}\n\tvar domNode = this.document.createElementNS(this.namespace,tag);\n\tthis.assignAttributes(domNode,{excludeEventAttributes: true});\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nElementWidget.prototype.execute = function() {\n\t// Select the namespace for the tag\n\tvar tagNamespaces = {\n\t\t\tsvg: \"http://www.w3.org/2000/svg\",\n\t\t\tmath: \"http://www.w3.org/1998/Math/MathML\",\n\t\t\tbody: \"http://www.w3.org/1999/xhtml\"\n\t\t};\n\tthis.namespace = tagNamespaces[this.parseTreeNode.tag];\n\tif(this.namespace) {\n\t\tthis.setVariable(\"namespace\",this.namespace);\n\t} else {\n\t\tthis.namespace = this.getVariable(\"namespace\",{defaultValue: \"http://www.w3.org/1999/xhtml\"});\n\t}\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nElementWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes(),\n\t\thasChangedAttributes = $tw.utils.count(changedAttributes) > 0;\n\tif(hasChangedAttributes) {\n\t\t// Update our attributes\n\t\tthis.assignAttributes(this.domNodes[0],{excludeEventAttributes: true});\n\t}\n\treturn this.refreshChildren(changedTiddlers) || hasChangedAttributes;\n};\n\nexports.element = ElementWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/element.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/encrypt.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/encrypt.js\ntype: application/javascript\nmodule-type: widget\n\nEncrypt widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EncryptWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEncryptWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEncryptWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar textNode = this.document.createTextNode(this.encryptedText);\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nEncryptWidget.prototype.execute = function() {\n\t// Get parameters from our attributes\n\tthis.filter = this.getAttribute(\"filter\",\"[!is[system]]\");\n\t// Encrypt the filtered tiddlers\n\tvar tiddlers = this.wiki.filterTiddlers(this.filter),\n\t\tjson = {},\n\t\tself = this;\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar tiddler = self.wiki.getTiddler(title),\n\t\t\tjsonTiddler = {};\n\t\tfor(var f in tiddler.fields) {\n\t\t\tjsonTiddler[f] = tiddler.getFieldString(f);\n\t\t}\n\t\tjson[title] = jsonTiddler;\n\t});\n\tthis.encryptedText = $tw.utils.htmlEncode($tw.crypto.encrypt(JSON.stringify(json)));\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nEncryptWidget.prototype.refresh = function(changedTiddlers) {\n\t// We don't need to worry about refreshing because the encrypt widget isn't for interactive use\n\treturn false;\n};\n\nexports.encrypt = EncryptWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/encrypt.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/entity.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/entity.js\ntype: application/javascript\nmodule-type: widget\n\nHTML entity widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EntityWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEntityWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEntityWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.execute();\n\tvar entityString = this.getAttribute(\"entity\",this.parseTreeNode.entity || \"\"),\n\t\ttextNode = this.document.createTextNode($tw.utils.entityDecode(entityString));\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nEntityWidget.prototype.execute = function() {\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nEntityWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.entity) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports.entity = EntityWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/entity.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/fieldmangler.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/fieldmangler.js\ntype: application/javascript\nmodule-type: widget\n\nField mangler widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar FieldManglerWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n\tthis.addEventListeners([\n\t\t{type: \"tm-remove-field\", handler: \"handleRemoveFieldEvent\"},\n\t\t{type: \"tm-add-field\", handler: \"handleAddFieldEvent\"},\n\t\t{type: \"tm-remove-tag\", handler: \"handleRemoveTagEvent\"},\n\t\t{type: \"tm-add-tag\", handler: \"handleAddTagEvent\"}\n\t]);\n};\n\n/*\nInherit from the base widget class\n*/\nFieldManglerWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nFieldManglerWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nFieldManglerWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.mangleTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nFieldManglerWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nFieldManglerWidget.prototype.handleRemoveFieldEvent = function(event) {\n\tvar tiddler = this.wiki.getTiddler(this.mangleTitle),\n\t\tdeletion = {};\n\tdeletion[event.param] = undefined;\n\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,deletion));\n\treturn true;\n};\n\nFieldManglerWidget.prototype.handleAddFieldEvent = function(event) {\n\tvar tiddler = this.wiki.getTiddler(this.mangleTitle),\n\t\taddition = this.wiki.getModificationFields(),\n\t\thadInvalidFieldName = false,\n\t\taddField = function(name,value) {\n\t\t\tvar trimmedName = name.toLowerCase().trim();\n\t\t\tif(!$tw.utils.isValidFieldName(trimmedName)) {\n\t\t\t\tif(!hadInvalidFieldName) {\n\t\t\t\t\talert($tw.language.getString(\n\t\t\t\t\t\t\"InvalidFieldName\",\n\t\t\t\t\t\t{variables:\n\t\t\t\t\t\t\t{fieldName: trimmedName}\n\t\t\t\t\t\t}\n\t\t\t\t\t));\n\t\t\t\t\thadInvalidFieldName = true;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif(!value && tiddler) {\n\t\t\t\t\tvalue = tiddler.fields[trimmedName];\n\t\t\t\t}\n\t\t\t\taddition[trimmedName] = value || \"\";\n\t\t\t}\n\t\t\treturn;\n\t\t};\n\taddition.title = this.mangleTitle;\n\tif(typeof event.param === \"string\") {\n\t\taddField(event.param,\"\");\n\t}\n\tif(typeof event.paramObject === \"object\") {\n\t\tfor(var name in event.paramObject) {\n\t\t\taddField(name,event.paramObject[name]);\n\t\t}\n\t}\n\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,addition));\n\treturn true;\n};\n\nFieldManglerWidget.prototype.handleRemoveTagEvent = function(event) {\n\tvar tiddler = this.wiki.getTiddler(this.mangleTitle);\n\tif(tiddler && tiddler.fields.tags) {\n\t\tvar p = tiddler.fields.tags.indexOf(event.param);\n\t\tif(p !== -1) {\n\t\t\tvar modification = this.wiki.getModificationFields();\n\t\t\tmodification.tags = (tiddler.fields.tags || []).slice(0);\n\t\t\tmodification.tags.splice(p,1);\n\t\t\tif(modification.tags.length === 0) {\n\t\t\t\tmodification.tags = undefined;\n\t\t\t}\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,modification));\n\t\t}\n\t}\n\treturn true;\n};\n\nFieldManglerWidget.prototype.handleAddTagEvent = function(event) {\n\tvar tiddler = this.wiki.getTiddler(this.mangleTitle);\n\tif(tiddler && typeof event.param === \"string\") {\n\t\tvar tag = event.param.trim();\n\t\tif(tag !== \"\") {\n\t\t\tvar modification = this.wiki.getModificationFields();\n\t\t\tmodification.tags = (tiddler.fields.tags || []).slice(0);\n\t\t\t$tw.utils.pushTop(modification.tags,tag);\n\t\t\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,modification));\t\t\t\n\t\t}\n\t} else if(typeof event.param === \"string\" && event.param.trim() !== \"\" && this.mangleTitle.trim() !== \"\") {\n\t\tvar tag = [];\n\t\ttag.push(event.param.trim());\n\t\tthis.wiki.addTiddler({title: this.mangleTitle, tags: tag});\t\t\n\t}\n\treturn true;\n};\n\nexports.fieldmangler = FieldManglerWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/fieldmangler.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/fields.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/fields.js\ntype: application/javascript\nmodule-type: widget\n\nFields widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar FieldsWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nFieldsWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nFieldsWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar textNode = this.document.createTextNode(this.text);\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nFieldsWidget.prototype.execute = function() {\n\t// Get parameters from our attributes\n\tthis.tiddlerTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.template = this.getAttribute(\"template\");\n\tthis.exclude = this.getAttribute(\"exclude\");\n\tthis.stripTitlePrefix = this.getAttribute(\"stripTitlePrefix\",\"no\") === \"yes\";\n\t// Get the value to display\n\tvar tiddler = this.wiki.getTiddler(this.tiddlerTitle);\n\t// Get the exclusion list\n\tvar exclude;\n\tif(this.exclude) {\n\t\texclude = this.exclude.split(\" \");\n\t} else {\n\t\texclude = [\"text\"]; \n\t}\n\t// Compose the template\n\tvar text = [];\n\tif(this.template && tiddler) {\n\t\tvar fields = [];\n\t\tfor(var fieldName in tiddler.fields) {\n\t\t\tif(exclude.indexOf(fieldName) === -1) {\n\t\t\t\tfields.push(fieldName);\n\t\t\t}\n\t\t}\n\t\tfields.sort();\n\t\tfor(var f=0; f<fields.length; f++) {\n\t\t\tfieldName = fields[f];\n\t\t\tif(exclude.indexOf(fieldName) === -1) {\n\t\t\t\tvar row = this.template,\n\t\t\t\t\tvalue = tiddler.getFieldString(fieldName);\n\t\t\t\tif(this.stripTitlePrefix && fieldName === \"title\") {\n\t\t\t\t\tvar reStrip = /^\\{[^\\}]+\\}(.+)/mg,\n\t\t\t\t\t\treMatch = reStrip.exec(value);\n\t\t\t\t\tif(reMatch) {\n\t\t\t\t\t\tvalue = reMatch[1];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trow = row.replace(\"$name$\",fieldName);\n\t\t\t\trow = row.replace(\"$value$\",value);\n\t\t\t\trow = row.replace(\"$encoded_value$\",$tw.utils.htmlEncode(value));\n\t\t\t\ttext.push(row);\n\t\t\t}\n\t\t}\n\t}\n\tthis.text = text.join(\"\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nFieldsWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.template || changedAttributes.exclude || changedAttributes.stripTitlePrefix || changedTiddlers[this.tiddlerTitle]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports.fields = FieldsWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/fields.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/image.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/image.js\ntype: application/javascript\nmodule-type: widget\n\nThe image widget displays an image referenced with an external URI or with a local tiddler title.\n\n```\n<$image src=\"TiddlerTitle\" width=\"320\" height=\"400\" class=\"classnames\">\n```\n\nThe image source can be the title of an existing tiddler or the URL of an external image.\n\nExternal images always generate an HTML `<img>` tag.\n\nTiddlers that have a _canonical_uri field generate an HTML `<img>` tag with the src attribute containing the URI.\n\nTiddlers that contain image data generate an HTML `<img>` tag with the src attribute containing a base64 representation of the image.\n\nTiddlers that contain wikitext could be rendered to a DIV of the usual size of a tiddler, and then transformed to the size requested.\n\nThe width and height attributes are interpreted as a number of pixels, and do not need to include the \"px\" suffix.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ImageWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nImageWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nImageWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\t// Determine what type of image it is\n\tvar tag = \"img\", src = \"\",\n\t\ttiddler = this.wiki.getTiddler(this.imageSource);\n\tif(!tiddler) {\n\t\t// The source isn't the title of a tiddler, so we'll assume it's a URL\n\t\tsrc = this.getVariable(\"tv-get-export-image-link\",{params: [{name: \"src\",value: this.imageSource}],defaultValue: this.imageSource});\n\t} else {\n\t\t// Check if it is an image tiddler\n\t\tif(this.wiki.isImageTiddler(this.imageSource)) {\n\t\t\tvar type = tiddler.fields.type,\n\t\t\t\ttext = tiddler.fields.text,\n\t\t\t\t_canonical_uri = tiddler.fields._canonical_uri;\n\t\t\t// If the tiddler has body text then it doesn't need to be lazily loaded\n\t\t\tif(text) {\n\t\t\t\t// Render the appropriate element for the image type\n\t\t\t\tswitch(type) {\n\t\t\t\t\tcase \"application/pdf\":\n\t\t\t\t\t\ttag = \"embed\";\n\t\t\t\t\t\tsrc = \"data:application/pdf;base64,\" + text;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"image/svg+xml\":\n\t\t\t\t\t\tsrc = \"data:image/svg+xml,\" + encodeURIComponent(text);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tsrc = \"data:\" + type + \";base64,\" + text;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t} else if(_canonical_uri) {\n\t\t\t\tswitch(type) {\n\t\t\t\t\tcase \"application/pdf\":\n\t\t\t\t\t\ttag = \"embed\";\n\t\t\t\t\t\tsrc = _canonical_uri;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"image/svg+xml\":\n\t\t\t\t\t\tsrc = _canonical_uri;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tsrc = _canonical_uri;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\t\n\t\t\t} else {\n\t\t\t\t// Just trigger loading of the tiddler\n\t\t\t\tthis.wiki.getTiddlerText(this.imageSource);\n\t\t\t}\n\t\t}\n\t}\n\t// Create the element and assign the attributes\n\tvar domNode = this.document.createElement(tag);\n\tdomNode.setAttribute(\"src\",src);\n\tif(this.imageClass) {\n\t\tdomNode.setAttribute(\"class\",this.imageClass);\t\t\n\t}\n\tif(this.imageWidth) {\n\t\tdomNode.setAttribute(\"width\",this.imageWidth);\n\t}\n\tif(this.imageHeight) {\n\t\tdomNode.setAttribute(\"height\",this.imageHeight);\n\t}\n\tif(this.imageTooltip) {\n\t\tdomNode.setAttribute(\"title\",this.imageTooltip);\t\t\n\t}\n\tif(this.imageAlt) {\n\t\tdomNode.setAttribute(\"alt\",this.imageAlt);\t\t\n\t}\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.domNodes.push(domNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nImageWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.imageSource = this.getAttribute(\"source\");\n\tthis.imageWidth = this.getAttribute(\"width\");\n\tthis.imageHeight = this.getAttribute(\"height\");\n\tthis.imageClass = this.getAttribute(\"class\");\n\tthis.imageTooltip = this.getAttribute(\"tooltip\");\n\tthis.imageAlt = this.getAttribute(\"alt\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nImageWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.source || changedAttributes.width || changedAttributes.height || changedAttributes[\"class\"] || changedAttributes.tooltip || changedTiddlers[this.imageSource]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\t\n\t}\n};\n\nexports.image = ImageWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/image.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/importvariables.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/importvariables.js\ntype: application/javascript\nmodule-type: widget\n\nImport variable definitions from other tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ImportVariablesWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nImportVariablesWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nImportVariablesWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nImportVariablesWidget.prototype.execute = function(tiddlerList) {\n\tvar self = this;\n\t// Get our parameters\n\tthis.filter = this.getAttribute(\"filter\");\n\t// Compute the filter\n\tthis.tiddlerList = tiddlerList || this.wiki.filterTiddlers(this.filter,this);\n\t// Accumulate the <$set> widgets from each tiddler\n\tvar widgetStackStart,widgetStackEnd;\n\tfunction addWidgetNode(widgetNode) {\n\t\tif(widgetNode) {\n\t\t\tif(!widgetStackStart && !widgetStackEnd) {\n\t\t\t\twidgetStackStart = widgetNode;\n\t\t\t\twidgetStackEnd = widgetNode;\n\t\t\t} else {\n\t\t\t\twidgetStackEnd.children = [widgetNode];\n\t\t\t\twidgetStackEnd = widgetNode;\n\t\t\t}\n\t\t}\n\t}\n\t$tw.utils.each(this.tiddlerList,function(title) {\n\t\tvar parser = self.wiki.parseTiddler(title);\n\t\tif(parser) {\n\t\t\tvar parseTreeNode = parser.tree[0];\n\t\t\twhile(parseTreeNode && parseTreeNode.type === \"set\") {\n\t\t\t\taddWidgetNode({\n\t\t\t\t\ttype: \"set\",\n\t\t\t\t\tattributes: parseTreeNode.attributes,\n\t\t\t\t\tparams: parseTreeNode.params\n\t\t\t\t});\n\t\t\t\tparseTreeNode = parseTreeNode.children[0];\n\t\t\t}\n\t\t} \n\t});\n\t// Add our own children to the end of the pile\n\tvar parseTreeNodes;\n\tif(widgetStackStart && widgetStackEnd) {\n\t\tparseTreeNodes = [widgetStackStart];\n\t\twidgetStackEnd.children = this.parseTreeNode.children;\n\t} else {\n\t\tparseTreeNodes = this.parseTreeNode.children;\n\t}\n\t// Construct the child widgets\n\tthis.makeChildWidgets(parseTreeNodes);\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nImportVariablesWidget.prototype.refresh = function(changedTiddlers) {\n\t// Recompute our attributes and the filter list\n\tvar changedAttributes = this.computeAttributes(),\n\t\ttiddlerList = this.wiki.filterTiddlers(this.getAttribute(\"filter\"),this);\n\t// Refresh if the filter has changed, or the list of tiddlers has changed, or any of the tiddlers in the list has changed\n\tfunction haveListedTiddlersChanged() {\n\t\tvar changed = false;\n\t\ttiddlerList.forEach(function(title) {\n\t\t\tif(changedTiddlers[title]) {\n\t\t\t\tchanged = true;\n\t\t\t}\n\t\t});\n\t\treturn changed;\n\t}\n\tif(changedAttributes.filter || !$tw.utils.isArrayEqual(this.tiddlerList,tiddlerList) || haveListedTiddlersChanged()) {\n\t\t// Compute the filter\n\t\tthis.removeChildDomNodes();\n\t\tthis.execute(tiddlerList);\n\t\tthis.renderChildren(this.parentDomNode,this.findNextSiblingDomNode());\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nexports.importvariables = ImportVariablesWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/importvariables.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/keyboard.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/keyboard.js\ntype: application/javascript\nmodule-type: widget\n\nKeyboard shortcut widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar KeyboardWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nKeyboardWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nKeyboardWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\tvar domNode = this.document.createElement(\"div\");\n\t// Assign classes\n\tvar classes = (this[\"class\"] || \"\").split(\" \");\n\tclasses.push(\"tc-keyboard\");\n\tdomNode.className = classes.join(\" \");\n\t// Add a keyboard event handler\n\tdomNode.addEventListener(\"keydown\",function (event) {\n\t\tif($tw.keyboardManager.checkKeyDescriptors(event,self.keyInfoArray)) {\n\t\t\tself.invokeActions(self,event);\n\t\t\tif(self.actions) {\n\t\t\t\tself.invokeActionString(self.actions,self,event);\n\t\t\t}\n\t\t\tself.dispatchMessage(event);\n\t\t\tevent.preventDefault();\n\t\t\tevent.stopPropagation();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t},false);\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\nKeyboardWidget.prototype.dispatchMessage = function(event) {\n\tthis.dispatchEvent({type: this.message, param: this.param, tiddlerTitle: this.getVariable(\"currentTiddler\")});\n};\n\n/*\nCompute the internal state of the widget\n*/\nKeyboardWidget.prototype.execute = function() {\n\t// Get attributes\n\tthis.actions = this.getAttribute(\"actions\");\n\tthis.message = this.getAttribute(\"message\");\n\tthis.param = this.getAttribute(\"param\");\n\tthis.key = this.getAttribute(\"key\");\n\tthis.keyInfoArray = $tw.keyboardManager.parseKeyDescriptors(this.key);\n\tthis[\"class\"] = this.getAttribute(\"class\");\n\t// Make child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nKeyboardWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.message || changedAttributes.param || changedAttributes.key || changedAttributes[\"class\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.keyboard = KeyboardWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/keyboard.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/link.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/link.js\ntype: application/javascript\nmodule-type: widget\n\nLink widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\nvar MISSING_LINK_CONFIG_TITLE = \"$:/config/MissingLinks\";\n\nvar LinkWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nLinkWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nLinkWidget.prototype.render = function(parent,nextSibling) {\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Get the value of the tv-wikilinks configuration macro\n\tvar wikiLinksMacro = this.getVariable(\"tv-wikilinks\"),\n\t\tuseWikiLinks = wikiLinksMacro ? (wikiLinksMacro.trim() !== \"no\") : true,\n\t\tmissingLinksEnabled = !(this.hideMissingLinks && this.isMissing && !this.isShadow);\n\t// Render the link if required\n\tif(useWikiLinks && missingLinksEnabled) {\n\t\tthis.renderLink(parent,nextSibling);\n\t} else {\n\t\t// Just insert the link text\n\t\tvar domNode = this.document.createElement(\"span\");\n\t\tparent.insertBefore(domNode,nextSibling);\n\t\tthis.renderChildren(domNode,null);\n\t\tthis.domNodes.push(domNode);\n\t}\n};\n\n/*\nRender this widget into the DOM\n*/\nLinkWidget.prototype.renderLink = function(parent,nextSibling) {\n\tvar self = this;\n\t// Sanitise the specified tag\n\tvar tag = this.linkTag;\n\tif($tw.config.htmlUnsafeElements.indexOf(tag) !== -1) {\n\t\ttag = \"a\";\n\t}\n\t// Create our element\n\tvar domNode = this.document.createElement(tag);\n\t// Assign classes\n\tvar classes = [];\n\tif(this.linkClasses) {\n\t\tclasses.push(this.linkClasses);\n\t}\n\tclasses.push(\"tc-tiddlylink\");\n\tif(this.isShadow) {\n\t\tclasses.push(\"tc-tiddlylink-shadow\");\n\t}\n\tif(this.isMissing && !this.isShadow) {\n\t\tclasses.push(\"tc-tiddlylink-missing\");\n\t} else {\n\t\tif(!this.isMissing) {\n\t\t\tclasses.push(\"tc-tiddlylink-resolves\");\n\t\t}\n\t}\n\tdomNode.setAttribute(\"class\",classes.join(\" \"));\n\t// Set an href\n\tvar wikiLinkTemplateMacro = this.getVariable(\"tv-wikilink-template\"),\n\t\twikiLinkTemplate = wikiLinkTemplateMacro ? wikiLinkTemplateMacro.trim() : \"#$uri_encoded$\",\n\t\twikiLinkText = wikiLinkTemplate.replace(\"$uri_encoded$\",encodeURIComponent(this.to));\n\twikiLinkText = wikiLinkText.replace(\"$uri_doubleencoded$\",encodeURIComponent(encodeURIComponent(this.to)));\n\twikiLinkText = this.getVariable(\"tv-get-export-link\",{params: [{name: \"to\",value: this.to}],defaultValue: wikiLinkText});\n\tif(tag === \"a\") {\n\t\tdomNode.setAttribute(\"href\",wikiLinkText);\n\t}\n\tif(this.tabIndex) {\n\t\tdomNode.setAttribute(\"tabindex\",this.tabIndex);\n\t}\n\t// Set the tooltip\n\t// HACK: Performance issues with re-parsing the tooltip prevent us defaulting the tooltip to \"<$transclude field='tooltip'><$transclude field='title'/></$transclude>\"\n\tvar tooltipWikiText = this.tooltip || this.getVariable(\"tv-wikilink-tooltip\");\n\tif(tooltipWikiText) {\n\t\tvar tooltipText = this.wiki.renderText(\"text/plain\",\"text/vnd.tiddlywiki\",tooltipWikiText,{\n\t\t\t\tparseAsInline: true,\n\t\t\t\tvariables: {\n\t\t\t\t\tcurrentTiddler: this.to\n\t\t\t\t},\n\t\t\t\tparentWidget: this\n\t\t\t});\n\t\tdomNode.setAttribute(\"title\",tooltipText);\n\t}\n\tif(this[\"aria-label\"]) {\n\t\tdomNode.setAttribute(\"aria-label\",this[\"aria-label\"]);\n\t}\n\t// Add a click event handler\n\t$tw.utils.addEventListeners(domNode,[\n\t\t{name: \"click\", handlerObject: this, handlerMethod: \"handleClickEvent\"},\n\t]);\n\tif(this.draggable === \"yes\") {\n\t\t$tw.utils.addEventListeners(domNode,[\n\t\t\t{name: \"dragstart\", handlerObject: this, handlerMethod: \"handleDragStartEvent\"},\n\t\t\t{name: \"dragend\", handlerObject: this, handlerMethod: \"handleDragEndEvent\"}\n\t\t]);\n\t}\n\t// Insert the link into the DOM and render any children\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\nLinkWidget.prototype.handleClickEvent = function(event) {\n\t// Send the click on its way as a navigate event\n\tvar bounds = this.domNodes[0].getBoundingClientRect();\n\tthis.dispatchEvent({\n\t\ttype: \"tm-navigate\",\n\t\tnavigateTo: this.to,\n\t\tnavigateFromTitle: this.getVariable(\"storyTiddler\"),\n\t\tnavigateFromNode: this,\n\t\tnavigateFromClientRect: { top: bounds.top, left: bounds.left, width: bounds.width, right: bounds.right, bottom: bounds.bottom, height: bounds.height\n\t\t},\n\t\tnavigateSuppressNavigation: event.metaKey || event.ctrlKey || (event.button === 1)\n\t});\n\tif(this.domNodes[0].hasAttribute(\"href\")) {\n\t\tevent.preventDefault();\n\t}\n\tevent.stopPropagation();\n\treturn false;\n};\n\nLinkWidget.prototype.handleDragStartEvent = function(event) {\n\tif(event.target === this.domNodes[0]) {\n\t\tif(this.to) {\n\t\t\t$tw.dragInProgress = true;\n\t\t\t// Set the dragging class on the element being dragged\n\t\t\t$tw.utils.addClass(event.target,\"tc-tiddlylink-dragging\");\n\t\t\t// Create the drag image elements\n\t\t\tthis.dragImage = this.document.createElement(\"div\");\n\t\t\tthis.dragImage.className = \"tc-tiddler-dragger\";\n\t\t\tvar inner = this.document.createElement(\"div\");\n\t\t\tinner.className = \"tc-tiddler-dragger-inner\";\n\t\t\tinner.appendChild(this.document.createTextNode(this.to));\n\t\t\tthis.dragImage.appendChild(inner);\n\t\t\tthis.document.body.appendChild(this.dragImage);\n\t\t\t// Astoundingly, we need to cover the dragger up: http://www.kryogenix.org/code/browser/custom-drag-image.html\n\t\t\tvar cover = this.document.createElement(\"div\");\n\t\t\tcover.className = \"tc-tiddler-dragger-cover\";\n\t\t\tcover.style.left = (inner.offsetLeft - 16) + \"px\";\n\t\t\tcover.style.top = (inner.offsetTop - 16) + \"px\";\n\t\t\tcover.style.width = (inner.offsetWidth + 32) + \"px\";\n\t\t\tcover.style.height = (inner.offsetHeight + 32) + \"px\";\n\t\t\tthis.dragImage.appendChild(cover);\n\t\t\t// Set the data transfer properties\n\t\t\tvar dataTransfer = event.dataTransfer;\n\t\t\t// First the image\n\t\t\tdataTransfer.effectAllowed = \"copy\";\n\t\t\tif(dataTransfer.setDragImage) {\n\t\t\t\tdataTransfer.setDragImage(this.dragImage.firstChild,-16,-16);\n\t\t\t}\n\t\t\t// Then the data\n\t\t\tdataTransfer.clearData();\n\t\t\tvar jsonData = this.wiki.getTiddlerAsJson(this.to),\n\t\t\t\ttextData = this.wiki.getTiddlerText(this.to,\"\"),\n\t\t\t\ttitle = (new RegExp(\"^\" + $tw.config.textPrimitives.wikiLink + \"$\",\"mg\")).exec(this.to) ? this.to : \"[[\" + this.to + \"]]\";\n\t\t\t// IE doesn't like these content types\n\t\t\tif(!$tw.browser.isIE) {\n\t\t\t\tdataTransfer.setData(\"text/vnd.tiddler\",jsonData);\n\t\t\t\tdataTransfer.setData(\"text/plain\",title);\n\t\t\t\tdataTransfer.setData(\"text/x-moz-url\",\"data:text/vnd.tiddler,\" + encodeURIComponent(jsonData));\n\t\t\t}\n\t\t\tdataTransfer.setData(\"URL\",\"data:text/vnd.tiddler,\" + encodeURIComponent(jsonData));\n\t\t\tdataTransfer.setData(\"Text\",title);\n\t\t\tevent.stopPropagation();\n\t\t} else {\n\t\t\tevent.preventDefault();\n\t\t}\n\t}\n};\n\nLinkWidget.prototype.handleDragEndEvent = function(event) {\n\tif(event.target === this.domNodes[0]) {\n\t\t$tw.dragInProgress = false;\n\t\t// Remove the dragging class on the element being dragged\n\t\t$tw.utils.removeClass(event.target,\"tc-tiddlylink-dragging\");\n\t\t// Delete the drag image element\n\t\tif(this.dragImage) {\n\t\t\tthis.dragImage.parentNode.removeChild(this.dragImage);\n\t\t}\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nLinkWidget.prototype.execute = function() {\n\t// Pick up our attributes\n\tthis.to = this.getAttribute(\"to\",this.getVariable(\"currentTiddler\"));\n\tthis.tooltip = this.getAttribute(\"tooltip\");\n\tthis[\"aria-label\"] = this.getAttribute(\"aria-label\");\n\tthis.linkClasses = this.getAttribute(\"class\");\n\tthis.tabIndex = this.getAttribute(\"tabindex\");\n\tthis.draggable = this.getAttribute(\"draggable\",\"yes\");\n\tthis.linkTag = this.getAttribute(\"tag\",\"a\");\n\t// Determine the link characteristics\n\tthis.isMissing = !this.wiki.tiddlerExists(this.to);\n\tthis.isShadow = this.wiki.isShadowTiddler(this.to);\n\tthis.hideMissingLinks = ($tw.wiki.getTiddlerText(MISSING_LINK_CONFIG_TITLE,\"yes\") === \"no\");\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nLinkWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.to || changedTiddlers[this.to] || changedAttributes[\"aria-label\"] || changedAttributes.tooltip || changedTiddlers[MISSING_LINK_CONFIG_TITLE]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.link = LinkWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/link.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/linkcatcher.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/linkcatcher.js\ntype: application/javascript\nmodule-type: widget\n\nLinkcatcher widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar LinkCatcherWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n\tthis.addEventListeners([\n\t\t{type: \"tm-navigate\", handler: \"handleNavigateEvent\"}\n\t]);\n};\n\n/*\nInherit from the base widget class\n*/\nLinkCatcherWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nLinkCatcherWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nLinkCatcherWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.catchTo = this.getAttribute(\"to\");\n\tthis.catchMessage = this.getAttribute(\"message\");\n\tthis.catchSet = this.getAttribute(\"set\");\n\tthis.catchSetTo = this.getAttribute(\"setTo\");\n\tthis.catchActions = this.getAttribute(\"actions\");\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nLinkCatcherWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.to || changedAttributes.message || changedAttributes.set || changedAttributes.setTo) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\n/*\nHandle a tm-navigate event\n*/\nLinkCatcherWidget.prototype.handleNavigateEvent = function(event) {\n\tif(this.catchTo) {\n\t\tthis.wiki.setTextReference(this.catchTo,event.navigateTo,this.getVariable(\"currentTiddler\"));\n\t}\n\tif(this.catchMessage && this.parentWidget) {\n\t\tthis.parentWidget.dispatchEvent({\n\t\t\ttype: this.catchMessage,\n\t\t\tparam: event.navigateTo,\n\t\t\tnavigateTo: event.navigateTo\n\t\t});\n\t}\n\tif(this.catchSet) {\n\t\tvar tiddler = this.wiki.getTiddler(this.catchSet);\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,{title: this.catchSet, text: this.catchSetTo}));\n\t}\n\tif(this.catchActions) {\n\t\tthis.invokeActionString(this.catchActions,this);\n\t}\n\treturn false;\n};\n\nexports.linkcatcher = LinkCatcherWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/linkcatcher.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/list.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/list.js\ntype: application/javascript\nmodule-type: widget\n\nList and list item widgets\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\n/*\nThe list widget creates list element sub-widgets that reach back into the list widget for their configuration\n*/\n\nvar ListWidget = function(parseTreeNode,options) {\n\t// Initialise the storyviews if they've not been done already\n\tif(!this.storyViews) {\n\t\tListWidget.prototype.storyViews = {};\n\t\t$tw.modules.applyMethods(\"storyview\",this.storyViews);\n\t}\n\t// Main initialisation inherited from widget.js\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nListWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nListWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n\t// Construct the storyview\n\tvar StoryView = this.storyViews[this.storyViewName];\n\tif(StoryView && !this.document.isTiddlyWikiFakeDom) {\n\t\tthis.storyview = new StoryView(this);\n\t} else {\n\t\tthis.storyview = null;\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nListWidget.prototype.execute = function() {\n\t// Get our attributes\n\tthis.template = this.getAttribute(\"template\");\n\tthis.editTemplate = this.getAttribute(\"editTemplate\");\n\tthis.variableName = this.getAttribute(\"variable\",\"currentTiddler\");\n\tthis.storyViewName = this.getAttribute(\"storyview\");\n\tthis.historyTitle = this.getAttribute(\"history\");\n\t// Compose the list elements\n\tthis.list = this.getTiddlerList();\n\tvar members = [],\n\t\tself = this;\n\t// Check for an empty list\n\tif(this.list.length === 0) {\n\t\tmembers = this.getEmptyMessage();\n\t} else {\n\t\t$tw.utils.each(this.list,function(title,index) {\n\t\t\tmembers.push(self.makeItemTemplate(title));\n\t\t});\n\t}\n\t// Construct the child widgets\n\tthis.makeChildWidgets(members);\n\t// Clear the last history\n\tthis.history = [];\n};\n\nListWidget.prototype.getTiddlerList = function() {\n\tvar defaultFilter = \"[!is[system]sort[title]]\";\n\treturn this.wiki.filterTiddlers(this.getAttribute(\"filter\",defaultFilter),this);\n};\n\nListWidget.prototype.getEmptyMessage = function() {\n\tvar emptyMessage = this.getAttribute(\"emptyMessage\",\"\"),\n\t\tparser = this.wiki.parseText(\"text/vnd.tiddlywiki\",emptyMessage,{parseAsInline: true});\n\tif(parser) {\n\t\treturn parser.tree;\n\t} else {\n\t\treturn [];\n\t}\n};\n\n/*\nCompose the template for a list item\n*/\nListWidget.prototype.makeItemTemplate = function(title) {\n\t// Check if the tiddler is a draft\n\tvar tiddler = this.wiki.getTiddler(title),\n\t\tisDraft = tiddler && tiddler.hasField(\"draft.of\"),\n\t\ttemplate = this.template,\n\t\ttemplateTree;\n\tif(isDraft && this.editTemplate) {\n\t\ttemplate = this.editTemplate;\n\t}\n\t// Compose the transclusion of the template\n\tif(template) {\n\t\ttemplateTree = [{type: \"transclude\", attributes: {tiddler: {type: \"string\", value: template}}}];\n\t} else {\n\t\tif(this.parseTreeNode.children && this.parseTreeNode.children.length > 0) {\n\t\t\ttemplateTree = this.parseTreeNode.children;\n\t\t} else {\n\t\t\t// Default template is a link to the title\n\t\t\ttemplateTree = [{type: \"element\", tag: this.parseTreeNode.isBlock ? \"div\" : \"span\", children: [{type: \"link\", attributes: {to: {type: \"string\", value: title}}, children: [\n\t\t\t\t\t{type: \"text\", text: title}\n\t\t\t]}]}];\n\t\t}\n\t}\n\t// Return the list item\n\treturn {type: \"listitem\", itemTitle: title, variableName: this.variableName, children: templateTree};\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nListWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes(),\n\t\tresult;\n\t// Call the storyview\n\tif(this.storyview && this.storyview.refreshStart) {\n\t\tthis.storyview.refreshStart(changedTiddlers,changedAttributes);\n\t}\n\t// Completely refresh if any of our attributes have changed\n\tif(changedAttributes.filter || changedAttributes.template || changedAttributes.editTemplate || changedAttributes.emptyMessage || changedAttributes.storyview || changedAttributes.history) {\n\t\tthis.refreshSelf();\n\t\tresult = true;\n\t} else {\n\t\t// Handle any changes to the list\n\t\tresult = this.handleListChanges(changedTiddlers);\n\t\t// Handle any changes to the history stack\n\t\tif(this.historyTitle && changedTiddlers[this.historyTitle]) {\n\t\t\tthis.handleHistoryChanges();\n\t\t}\n\t}\n\t// Call the storyview\n\tif(this.storyview && this.storyview.refreshEnd) {\n\t\tthis.storyview.refreshEnd(changedTiddlers,changedAttributes);\n\t}\n\treturn result;\n};\n\n/*\nHandle any changes to the history list\n*/\nListWidget.prototype.handleHistoryChanges = function() {\n\t// Get the history data\n\tvar newHistory = this.wiki.getTiddlerDataCached(this.historyTitle,[]);\n\t// Ignore any entries of the history that match the previous history\n\tvar entry = 0;\n\twhile(entry < newHistory.length && entry < this.history.length && newHistory[entry].title === this.history[entry].title) {\n\t\tentry++;\n\t}\n\t// Navigate forwards to each of the new tiddlers\n\twhile(entry < newHistory.length) {\n\t\tif(this.storyview && this.storyview.navigateTo) {\n\t\t\tthis.storyview.navigateTo(newHistory[entry]);\n\t\t}\n\t\tentry++;\n\t}\n\t// Update the history\n\tthis.history = newHistory;\n};\n\n/*\nProcess any changes to the list\n*/\nListWidget.prototype.handleListChanges = function(changedTiddlers) {\n\t// Get the new list\n\tvar prevList = this.list;\n\tthis.list = this.getTiddlerList();\n\t// Check for an empty list\n\tif(this.list.length === 0) {\n\t\t// Check if it was empty before\n\t\tif(prevList.length === 0) {\n\t\t\t// If so, just refresh the empty message\n\t\t\treturn this.refreshChildren(changedTiddlers);\n\t\t} else {\n\t\t\t// Replace the previous content with the empty message\n\t\t\tfor(t=this.children.length-1; t>=0; t--) {\n\t\t\t\tthis.removeListItem(t);\n\t\t\t}\n\t\t\tvar nextSibling = this.findNextSiblingDomNode();\n\t\t\tthis.makeChildWidgets(this.getEmptyMessage());\n\t\t\tthis.renderChildren(this.parentDomNode,nextSibling);\n\t\t\treturn true;\n\t\t}\n\t} else {\n\t\t// If the list was empty then we need to remove the empty message\n\t\tif(prevList.length === 0) {\n\t\t\tthis.removeChildDomNodes();\n\t\t\tthis.children = [];\n\t\t}\n\t\t// Cycle through the list, inserting and removing list items as needed\n\t\tvar hasRefreshed = false;\n\t\tfor(var t=0; t<this.list.length; t++) {\n\t\t\tvar index = this.findListItem(t,this.list[t]);\n\t\t\tif(index === undefined) {\n\t\t\t\t// The list item must be inserted\n\t\t\t\tthis.insertListItem(t,this.list[t]);\n\t\t\t\thasRefreshed = true;\n\t\t\t} else {\n\t\t\t\t// There are intervening list items that must be removed\n\t\t\t\tfor(var n=index-1; n>=t; n--) {\n\t\t\t\t\tthis.removeListItem(n);\n\t\t\t\t\thasRefreshed = true;\n\t\t\t\t}\n\t\t\t\t// Refresh the item we're reusing\n\t\t\t\tvar refreshed = this.children[t].refresh(changedTiddlers);\n\t\t\t\thasRefreshed = hasRefreshed || refreshed;\n\t\t\t}\n\t\t}\n\t\t// Remove any left over items\n\t\tfor(t=this.children.length-1; t>=this.list.length; t--) {\n\t\t\tthis.removeListItem(t);\n\t\t\thasRefreshed = true;\n\t\t}\n\t\treturn hasRefreshed;\n\t}\n};\n\n/*\nFind the list item with a given title, starting from a specified position\n*/\nListWidget.prototype.findListItem = function(startIndex,title) {\n\twhile(startIndex < this.children.length) {\n\t\tif(this.children[startIndex].parseTreeNode.itemTitle === title) {\n\t\t\treturn startIndex;\n\t\t}\n\t\tstartIndex++;\n\t}\n\treturn undefined;\n};\n\n/*\nInsert a new list item at the specified index\n*/\nListWidget.prototype.insertListItem = function(index,title) {\n\t// Create, insert and render the new child widgets\n\tvar widget = this.makeChildWidget(this.makeItemTemplate(title));\n\twidget.parentDomNode = this.parentDomNode; // Hack to enable findNextSiblingDomNode() to work\n\tthis.children.splice(index,0,widget);\n\tvar nextSibling = widget.findNextSiblingDomNode();\n\twidget.render(this.parentDomNode,nextSibling);\n\t// Animate the insertion if required\n\tif(this.storyview && this.storyview.insert) {\n\t\tthis.storyview.insert(widget);\n\t}\n\treturn true;\n};\n\n/*\nRemove the specified list item\n*/\nListWidget.prototype.removeListItem = function(index) {\n\tvar widget = this.children[index];\n\t// Animate the removal if required\n\tif(this.storyview && this.storyview.remove) {\n\t\tthis.storyview.remove(widget);\n\t} else {\n\t\twidget.removeChildDomNodes();\n\t}\n\t// Remove the child widget\n\tthis.children.splice(index,1);\n};\n\nexports.list = ListWidget;\n\nvar ListItemWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nListItemWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nListItemWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nListItemWidget.prototype.execute = function() {\n\t// Set the current list item title\n\tthis.setVariable(this.parseTreeNode.variableName,this.parseTreeNode.itemTitle);\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nListItemWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.listitem = ListItemWidget;\n\n})();",
            "title": "$:/core/modules/widgets/list.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/macrocall.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/macrocall.js\ntype: application/javascript\nmodule-type: widget\n\nMacrocall widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar MacroCallWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nMacroCallWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nMacroCallWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nMacroCallWidget.prototype.execute = function() {\n\t// Get the parse type if specified\n\tthis.parseType = this.getAttribute(\"$type\",\"text/vnd.tiddlywiki\");\n\tthis.renderOutput = this.getAttribute(\"$output\",\"text/html\");\n\t// Merge together the parameters specified in the parse tree with the specified attributes\n\tvar params = this.parseTreeNode.params ? this.parseTreeNode.params.slice(0) : [];\n\t$tw.utils.each(this.attributes,function(attribute,name) {\n\t\tif(name.charAt(0) !== \"$\") {\n\t\t\tparams.push({name: name, value: attribute});\t\t\t\n\t\t}\n\t});\n\t// Get the macro value\n\tvar text = this.getVariable(this.parseTreeNode.name || this.getAttribute(\"$name\"),{params: params}),\n\t\tparseTreeNodes;\n\t// Are we rendering to HTML?\n\tif(this.renderOutput === \"text/html\") {\n\t\t// If so we'll return the parsed macro\n\t\tvar parser = this.wiki.parseText(this.parseType,text,\n\t\t\t\t\t\t\t{parseAsInline: !this.parseTreeNode.isBlock});\n\t\tparseTreeNodes = parser ? parser.tree : [];\n\t} else {\n\t\t// Otherwise, we'll render the text\n\t\tvar plainText = this.wiki.renderText(\"text/plain\",this.parseType,text,{parentWidget: this});\n\t\tparseTreeNodes = [{type: \"text\", text: plainText}];\n\t}\n\t// Construct the child widgets\n\tthis.makeChildWidgets(parseTreeNodes);\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nMacroCallWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif($tw.utils.count(changedAttributes) > 0) {\n\t\t// Rerender ourselves\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\n\t}\n};\n\nexports.macrocall = MacroCallWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/macrocall.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/navigator.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/navigator.js\ntype: application/javascript\nmodule-type: widget\n\nNavigator widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar IMPORT_TITLE = \"$:/Import\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar NavigatorWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n\tthis.addEventListeners([\n\t\t{type: \"tm-navigate\", handler: \"handleNavigateEvent\"},\n\t\t{type: \"tm-edit-tiddler\", handler: \"handleEditTiddlerEvent\"},\n\t\t{type: \"tm-delete-tiddler\", handler: \"handleDeleteTiddlerEvent\"},\n\t\t{type: \"tm-save-tiddler\", handler: \"handleSaveTiddlerEvent\"},\n\t\t{type: \"tm-cancel-tiddler\", handler: \"handleCancelTiddlerEvent\"},\n\t\t{type: \"tm-close-tiddler\", handler: \"handleCloseTiddlerEvent\"},\n\t\t{type: \"tm-close-all-tiddlers\", handler: \"handleCloseAllTiddlersEvent\"},\n\t\t{type: \"tm-close-other-tiddlers\", handler: \"handleCloseOtherTiddlersEvent\"},\n\t\t{type: \"tm-new-tiddler\", handler: \"handleNewTiddlerEvent\"},\n\t\t{type: \"tm-import-tiddlers\", handler: \"handleImportTiddlersEvent\"},\n\t\t{type: \"tm-perform-import\", handler: \"handlePerformImportEvent\"},\n\t\t{type: \"tm-fold-tiddler\", handler: \"handleFoldTiddlerEvent\"},\n\t\t{type: \"tm-fold-other-tiddlers\", handler: \"handleFoldOtherTiddlersEvent\"},\n\t\t{type: \"tm-fold-all-tiddlers\", handler: \"handleFoldAllTiddlersEvent\"},\n\t\t{type: \"tm-unfold-all-tiddlers\", handler: \"handleUnfoldAllTiddlersEvent\"},\n\t\t{type: \"tm-rename-tiddler\", handler: \"handleRenameTiddlerEvent\"}\n\t]);\n};\n\n/*\nInherit from the base widget class\n*/\nNavigatorWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nNavigatorWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nNavigatorWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.storyTitle = this.getAttribute(\"story\");\n\tthis.historyTitle = this.getAttribute(\"history\");\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nNavigatorWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.story || changedAttributes.history) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nNavigatorWidget.prototype.getStoryList = function() {\n\treturn this.storyTitle ? this.wiki.getTiddlerList(this.storyTitle) : null;\n};\n\nNavigatorWidget.prototype.saveStoryList = function(storyList) {\n\tvar storyTiddler = this.wiki.getTiddler(this.storyTitle);\n\tthis.wiki.addTiddler(new $tw.Tiddler(\n\t\t{title: this.storyTitle},\n\t\tstoryTiddler,\n\t\t{list: storyList}\n\t));\n};\n\nNavigatorWidget.prototype.removeTitleFromStory = function(storyList,title) {\n\tvar p = storyList.indexOf(title);\n\twhile(p !== -1) {\n\t\tstoryList.splice(p,1);\n\t\tp = storyList.indexOf(title);\n\t}\n};\n\nNavigatorWidget.prototype.replaceFirstTitleInStory = function(storyList,oldTitle,newTitle) {\n\tvar pos = storyList.indexOf(oldTitle);\n\tif(pos !== -1) {\n\t\tstoryList[pos] = newTitle;\n\t\tdo {\n\t\t\tpos = storyList.indexOf(oldTitle,pos + 1);\n\t\t\tif(pos !== -1) {\n\t\t\t\tstoryList.splice(pos,1);\n\t\t\t}\n\t\t} while(pos !== -1);\n\t} else {\n\t\tstoryList.splice(0,0,newTitle);\n\t}\n};\n\nNavigatorWidget.prototype.addToStory = function(title,fromTitle) {\n\tvar storyList = this.getStoryList();\n\t// Quit if we cannot get hold of the story list\n\tif(!storyList) {\n\t\treturn;\n\t}\n\t// See if the tiddler is already there\n\tvar slot = storyList.indexOf(title);\n\t// Quit if it already exists in the story river\n\tif(slot >= 0) {\n\t\treturn;\n\t}\n\t// First we try to find the position of the story element we navigated from\n\tvar fromIndex = storyList.indexOf(fromTitle);\n\tif(fromIndex >= 0) {\n\t\t// The tiddler is added from inside the river\n\t\t// Determine where to insert the tiddler; Fallback is \"below\"\n\t\tswitch(this.getAttribute(\"openLinkFromInsideRiver\",\"below\")) {\n\t\t\tcase \"top\":\n\t\t\t\tslot = 0;\n\t\t\t\tbreak;\n\t\t\tcase \"bottom\":\n\t\t\t\tslot = storyList.length;\n\t\t\t\tbreak;\n\t\t\tcase \"above\":\n\t\t\t\tslot = fromIndex;\n\t\t\t\tbreak;\n\t\t\tcase \"below\": // Intentional fall-through\n\t\t\tdefault:\n\t\t\t\tslot = fromIndex + 1;\n\t\t\t\tbreak;\n\t\t}\n\t} else {\n\t\t// The tiddler is opened from outside the river. Determine where to insert the tiddler; default is \"top\"\n\t\tif(this.getAttribute(\"openLinkFromOutsideRiver\",\"top\") === \"bottom\") {\n\t\t\t// Insert at bottom\n\t\t\tslot = storyList.length;\n\t\t} else {\n\t\t\t// Insert at top\n\t\t\tslot = 0;\n\t\t}\n\t}\n\t// Add the tiddler\n\tstoryList.splice(slot,0,title);\n\t// Save the story\n\tthis.saveStoryList(storyList);\n};\n\n/*\nAdd a new record to the top of the history stack\ntitle: a title string or an array of title strings\nfromPageRect: page coordinates of the origin of the navigation\n*/\nNavigatorWidget.prototype.addToHistory = function(title,fromPageRect) {\n\tthis.wiki.addToHistory(title,fromPageRect,this.historyTitle);\n};\n\n/*\nHandle a tm-navigate event\n*/\nNavigatorWidget.prototype.handleNavigateEvent = function(event) {\n\tif(event.navigateTo) {\n\t\tthis.addToStory(event.navigateTo,event.navigateFromTitle);\n\t\tif(!event.navigateSuppressNavigation) {\n\t\t\tthis.addToHistory(event.navigateTo,event.navigateFromClientRect);\n\t\t}\n\t}\n\treturn false;\n};\n\n// Close a specified tiddler\nNavigatorWidget.prototype.handleCloseTiddlerEvent = function(event) {\n\tvar title = event.param || event.tiddlerTitle,\n\t\tstoryList = this.getStoryList();\n\t// Look for tiddlers with this title to close\n\tthis.removeTitleFromStory(storyList,title);\n\tthis.saveStoryList(storyList);\n\treturn false;\n};\n\n// Close all tiddlers\nNavigatorWidget.prototype.handleCloseAllTiddlersEvent = function(event) {\n\tthis.saveStoryList([]);\n\treturn false;\n};\n\n// Close other tiddlers\nNavigatorWidget.prototype.handleCloseOtherTiddlersEvent = function(event) {\n\tvar title = event.param || event.tiddlerTitle;\n\tthis.saveStoryList([title]);\n\treturn false;\n};\n\n// Place a tiddler in edit mode\nNavigatorWidget.prototype.handleEditTiddlerEvent = function(event) {\n\tvar self = this;\n\tfunction isUnmodifiedShadow(title) {\n\t\treturn self.wiki.isShadowTiddler(title) && !self.wiki.tiddlerExists(title);\n\t}\n\tfunction confirmEditShadow(title) {\n\t\treturn confirm($tw.language.getString(\n\t\t\t\"ConfirmEditShadowTiddler\",\n\t\t\t{variables:\n\t\t\t\t{title: title}\n\t\t\t}\n\t\t));\n\t}\n\tvar title = event.param || event.tiddlerTitle;\n\tif(isUnmodifiedShadow(title) && !confirmEditShadow(title)) {\n\t\treturn false;\n\t}\n\t// Replace the specified tiddler with a draft in edit mode\n\tvar draftTiddler = this.makeDraftTiddler(title);\n\t// Update the story and history if required\n\tif(!event.paramObject || event.paramObject.suppressNavigation !== \"yes\") {\n\t\tvar draftTitle = draftTiddler.fields.title,\n\t\t\tstoryList = this.getStoryList();\n\t\tthis.removeTitleFromStory(storyList,draftTitle);\n\t\tthis.replaceFirstTitleInStory(storyList,title,draftTitle);\n\t\tthis.addToHistory(draftTitle,event.navigateFromClientRect);\n\t\tthis.saveStoryList(storyList);\n\t\treturn false;\n\t}\n};\n\n// Delete a tiddler\nNavigatorWidget.prototype.handleDeleteTiddlerEvent = function(event) {\n\t// Get the tiddler we're deleting\n\tvar title = event.param || event.tiddlerTitle,\n\t\ttiddler = this.wiki.getTiddler(title),\n\t\tstoryList = this.getStoryList(),\n\t\toriginalTitle = tiddler ? tiddler.fields[\"draft.of\"] : \"\",\n\t\tconfirmationTitle;\n\tif(!tiddler) {\n\t\treturn false;\n\t}\n\t// Check if the tiddler we're deleting is in draft mode\n\tif(originalTitle) {\n\t\t// If so, we'll prompt for confirmation referencing the original tiddler\n\t\tconfirmationTitle = originalTitle;\n\t} else {\n\t\t// If not a draft, then prompt for confirmation referencing the specified tiddler\n\t\tconfirmationTitle = title;\n\t}\n\t// Seek confirmation\n\tif((this.wiki.getTiddler(originalTitle) || (tiddler.fields.text || \"\") !== \"\") && !confirm($tw.language.getString(\n\t\t\t\t\"ConfirmDeleteTiddler\",\n\t\t\t\t{variables:\n\t\t\t\t\t{title: confirmationTitle}\n\t\t\t\t}\n\t\t\t))) {\n\t\treturn false;\n\t}\n\t// Delete the original tiddler\n\tif(originalTitle) {\n\t\tthis.wiki.deleteTiddler(originalTitle);\n\t\tthis.removeTitleFromStory(storyList,originalTitle);\n\t}\n\t// Delete this tiddler\n\tthis.wiki.deleteTiddler(title);\n\t// Remove the closed tiddler from the story\n\tthis.removeTitleFromStory(storyList,title);\n\tthis.saveStoryList(storyList);\n\t// Trigger an autosave\n\t$tw.rootWidget.dispatchEvent({type: \"tm-auto-save-wiki\"});\n\treturn false;\n};\n\n/*\nCreate/reuse the draft tiddler for a given title\n*/\nNavigatorWidget.prototype.makeDraftTiddler = function(targetTitle) {\n\t// See if there is already a draft tiddler for this tiddler\n\tvar draftTitle = this.wiki.findDraft(targetTitle);\n\tif(draftTitle) {\n\t\treturn this.wiki.getTiddler(draftTitle);\n\t}\n\t// Get the current value of the tiddler we're editing\n\tvar tiddler = this.wiki.getTiddler(targetTitle);\n\t// Save the initial value of the draft tiddler\n\tdraftTitle = this.generateDraftTitle(targetTitle);\n\tvar draftTiddler = new $tw.Tiddler(\n\t\t\ttiddler,\n\t\t\t{\n\t\t\t\ttitle: draftTitle,\n\t\t\t\t\"draft.title\": targetTitle,\n\t\t\t\t\"draft.of\": targetTitle\n\t\t\t},\n\t\t\tthis.wiki.getModificationFields()\n\t\t);\n\tthis.wiki.addTiddler(draftTiddler);\n\treturn draftTiddler;\n};\n\n/*\nGenerate a title for the draft of a given tiddler\n*/\nNavigatorWidget.prototype.generateDraftTitle = function(title) {\n\tvar c = 0,\n\t\tdraftTitle;\n\tdo {\n\t\tdraftTitle = \"Draft \" + (c ? (c + 1) + \" \" : \"\") + \"of '\" + title + \"'\";\n\t\tc++;\n\t} while(this.wiki.tiddlerExists(draftTitle));\n\treturn draftTitle;\n};\n\n// Take a tiddler out of edit mode, saving the changes\nNavigatorWidget.prototype.handleSaveTiddlerEvent = function(event) {\n\tvar title = event.param || event.tiddlerTitle,\n\t\ttiddler = this.wiki.getTiddler(title),\n\t\tstoryList = this.getStoryList();\n\t// Replace the original tiddler with the draft\n\tif(tiddler) {\n\t\tvar draftTitle = (tiddler.fields[\"draft.title\"] || \"\").trim(),\n\t\t\tdraftOf = (tiddler.fields[\"draft.of\"] || \"\").trim();\n\t\tif(draftTitle) {\n\t\t\tvar isRename = draftOf !== draftTitle,\n\t\t\t\tisConfirmed = true;\n\t\t\tif(isRename && this.wiki.tiddlerExists(draftTitle)) {\n\t\t\t\tisConfirmed = confirm($tw.language.getString(\n\t\t\t\t\t\"ConfirmOverwriteTiddler\",\n\t\t\t\t\t{variables:\n\t\t\t\t\t\t{title: draftTitle}\n\t\t\t\t\t}\n\t\t\t\t));\n\t\t\t}\n\t\t\tif(isConfirmed) {\n\t\t\t\t// Create the new tiddler and pass it through the th-saving-tiddler hook\n\t\t\t\tvar newTiddler = new $tw.Tiddler(this.wiki.getCreationFields(),tiddler,{\n\t\t\t\t\ttitle: draftTitle,\n\t\t\t\t\t\"draft.title\": undefined,\n\t\t\t\t\t\"draft.of\": undefined\n\t\t\t\t},this.wiki.getModificationFields());\n\t\t\t\tnewTiddler = $tw.hooks.invokeHook(\"th-saving-tiddler\",newTiddler);\n\t\t\t\tthis.wiki.addTiddler(newTiddler);\n\t\t\t\t// Remove the draft tiddler\n\t\t\t\tthis.wiki.deleteTiddler(title);\n\t\t\t\t// Remove the original tiddler if we're renaming it\n\t\t\t\tif(isRename) {\n\t\t\t\t\tthis.wiki.deleteTiddler(draftOf);\n\t\t\t\t}\n\t\t\t\tif(!event.paramObject || event.paramObject.suppressNavigation !== \"yes\") {\n\t\t\t\t\t// Replace the draft in the story with the original\n\t\t\t\t\tthis.replaceFirstTitleInStory(storyList,title,draftTitle);\n\t\t\t\t\tthis.addToHistory(draftTitle,event.navigateFromClientRect);\n\t\t\t\t\tif(draftTitle !== this.storyTitle) {\n\t\t\t\t\t\tthis.saveStoryList(storyList);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Trigger an autosave\n\t\t\t\t$tw.rootWidget.dispatchEvent({type: \"tm-auto-save-wiki\"});\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n};\n\n// Take a tiddler out of edit mode without saving the changes\nNavigatorWidget.prototype.handleCancelTiddlerEvent = function(event) {\n\t// Flip the specified tiddler from draft back to the original\n\tvar draftTitle = event.param || event.tiddlerTitle,\n\t\tdraftTiddler = this.wiki.getTiddler(draftTitle),\n\t\toriginalTitle = draftTiddler && draftTiddler.fields[\"draft.of\"];\n\tif(draftTiddler && originalTitle) {\n\t\t// Ask for confirmation if the tiddler text has changed\n\t\tvar isConfirmed = true,\n\t\t\toriginalTiddler = this.wiki.getTiddler(originalTitle),\n\t\t\tstoryList = this.getStoryList();\n\t\tif(this.wiki.isDraftModified(draftTitle)) {\n\t\t\tisConfirmed = confirm($tw.language.getString(\n\t\t\t\t\"ConfirmCancelTiddler\",\n\t\t\t\t{variables:\n\t\t\t\t\t{title: draftTitle}\n\t\t\t\t}\n\t\t\t));\n\t\t}\n\t\t// Remove the draft tiddler\n\t\tif(isConfirmed) {\n\t\t\tthis.wiki.deleteTiddler(draftTitle);\n\t\t\tif(!event.paramObject || event.paramObject.suppressNavigation !== \"yes\") {\n\t\t\t\tif(originalTiddler) {\n\t\t\t\t\tthis.replaceFirstTitleInStory(storyList,draftTitle,originalTitle);\n\t\t\t\t\tthis.addToHistory(originalTitle,event.navigateFromClientRect);\n\t\t\t\t} else {\n\t\t\t\t\tthis.removeTitleFromStory(storyList,draftTitle);\n\t\t\t\t}\n\t\t\t\tthis.saveStoryList(storyList);\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n};\n\n// Create a new draft tiddler\n// event.param can either be the title of a template tiddler, or a hashmap of fields.\n//\n// The title of the newly created tiddler follows these rules:\n// * If a hashmap was used and a title field was specified, use that title\n// * If a hashmap was used without a title field, use a default title, if necessary making it unique with a numeric suffix\n// * If a template tiddler was used, use the title of the template, if necessary making it unique with a numeric suffix\n//\n// If a draft of the target tiddler already exists then it is reused\nNavigatorWidget.prototype.handleNewTiddlerEvent = function(event) {\n\t// Get the story details\n\tvar storyList = this.getStoryList(),\n\t\ttemplateTiddler, additionalFields, title, draftTitle, existingTiddler;\n\t// Get the template tiddler (if any)\n\tif(typeof event.param === \"string\") {\n\t\t// Get the template tiddler\n\t\ttemplateTiddler = this.wiki.getTiddler(event.param);\n\t\t// Generate a new title\n\t\ttitle = this.wiki.generateNewTitle(event.param || $tw.language.getString(\"DefaultNewTiddlerTitle\"));\n\t}\n\t// Get the specified additional fields\n\tif(typeof event.paramObject === \"object\") {\n\t\tadditionalFields = event.paramObject;\n\t}\n\tif(typeof event.param === \"object\") { // Backwards compatibility with 5.1.3\n\t\tadditionalFields = event.param;\n\t}\n\tif(additionalFields && additionalFields.title) {\n\t\ttitle = additionalFields.title;\n\t}\n\t// Generate a title if we don't have one\n\ttitle = title || this.wiki.generateNewTitle($tw.language.getString(\"DefaultNewTiddlerTitle\"));\n\t// Find any existing draft for this tiddler\n\tdraftTitle = this.wiki.findDraft(title);\n\t// Pull in any existing tiddler\n\tif(draftTitle) {\n\t\texistingTiddler = this.wiki.getTiddler(draftTitle);\n\t} else {\n\t\tdraftTitle = this.generateDraftTitle(title);\n\t\texistingTiddler = this.wiki.getTiddler(title);\n\t}\n\t// Merge the tags\n\tvar mergedTags = [];\n\tif(existingTiddler && existingTiddler.fields.tags) {\n\t\t$tw.utils.pushTop(mergedTags,existingTiddler.fields.tags)\n\t}\n\tif(additionalFields && additionalFields.tags) {\n\t\t// Merge tags\n\t\tmergedTags = $tw.utils.pushTop(mergedTags,$tw.utils.parseStringArray(additionalFields.tags));\n\t}\n\tif(templateTiddler && templateTiddler.fields.tags) {\n\t\t// Merge tags\n\t\tmergedTags = $tw.utils.pushTop(mergedTags,templateTiddler.fields.tags);\n\t}\n\t// Save the draft tiddler\n\tvar draftTiddler = new $tw.Tiddler({\n\t\t\ttext: \"\",\n\t\t\t\"draft.title\": title\n\t\t},\n\t\ttemplateTiddler,\n\t\texistingTiddler,\n\t\tadditionalFields,\n\t\tthis.wiki.getCreationFields(),\n\t\t{\n\t\t\ttitle: draftTitle,\n\t\t\t\"draft.of\": title,\n\t\t\ttags: mergedTags\n\t\t},this.wiki.getModificationFields());\n\tthis.wiki.addTiddler(draftTiddler);\n\t// Update the story to insert the new draft at the top and remove any existing tiddler\n\tif(storyList.indexOf(draftTitle) === -1) {\n\t\tvar slot = storyList.indexOf(event.navigateFromTitle);\n\t\tstoryList.splice(slot + 1,0,draftTitle);\n\t}\n\tif(storyList.indexOf(title) !== -1) {\n\t\tstoryList.splice(storyList.indexOf(title),1);\t\t\n\t}\n\tthis.saveStoryList(storyList);\n\t// Add a new record to the top of the history stack\n\tthis.addToHistory(draftTitle);\n\treturn false;\n};\n\n// Import JSON tiddlers into a pending import tiddler\nNavigatorWidget.prototype.handleImportTiddlersEvent = function(event) {\n\tvar self = this;\n\t// Get the tiddlers\n\tvar tiddlers = [];\n\ttry {\n\t\ttiddlers = JSON.parse(event.param);\t\n\t} catch(e) {\n\t}\n\t// Get the current $:/Import tiddler\n\tvar importTiddler = this.wiki.getTiddler(IMPORT_TITLE),\n\t\timportData = this.wiki.getTiddlerData(IMPORT_TITLE,{}),\n\t\tnewFields = new Object({\n\t\t\ttitle: IMPORT_TITLE,\n\t\t\ttype: \"application/json\",\n\t\t\t\"plugin-type\": \"import\",\n\t\t\t\"status\": \"pending\"\n\t\t}),\n\t\tincomingTiddlers = [];\n\t// Process each tiddler\n\timportData.tiddlers = importData.tiddlers || {};\n\t$tw.utils.each(tiddlers,function(tiddlerFields) {\n\t\tvar title = tiddlerFields.title;\n\t\tif(title) {\n\t\t\tincomingTiddlers.push(title);\n\t\t\timportData.tiddlers[title] = tiddlerFields;\n\t\t}\n\t});\n\t// Give the active upgrader modules a chance to process the incoming tiddlers\n\tvar messages = this.wiki.invokeUpgraders(incomingTiddlers,importData.tiddlers);\n\t$tw.utils.each(messages,function(message,title) {\n\t\tnewFields[\"message-\" + title] = message;\n\t});\n\t// Deselect any suppressed tiddlers\n\t$tw.utils.each(importData.tiddlers,function(tiddler,title) {\n\t\tif($tw.utils.count(tiddler) === 0) {\n\t\t\tnewFields[\"selection-\" + title] = \"unchecked\";\n\t\t}\n\t});\n\t// Save the $:/Import tiddler\n\tnewFields.text = JSON.stringify(importData,null,$tw.config.preferences.jsonSpaces);\n\tthis.wiki.addTiddler(new $tw.Tiddler(importTiddler,newFields));\n\t// Update the story and history details\n\tif(this.getVariable(\"tv-auto-open-on-import\") !== \"no\") {\n\t\tvar storyList = this.getStoryList(),\n\t\t\thistory = [];\n\t\t// Add it to the story\n\t\tif(storyList.indexOf(IMPORT_TITLE) === -1) {\n\t\t\tstoryList.unshift(IMPORT_TITLE);\n\t\t}\n\t\t// And to history\n\t\thistory.push(IMPORT_TITLE);\n\t\t// Save the updated story and history\n\t\tthis.saveStoryList(storyList);\n\t\tthis.addToHistory(history);\t\t\n\t}\n\treturn false;\n};\n\n// \nNavigatorWidget.prototype.handlePerformImportEvent = function(event) {\n\tvar self = this,\n\t\timportTiddler = this.wiki.getTiddler(event.param),\n\t\timportData = this.wiki.getTiddlerDataCached(event.param,{tiddlers: {}}),\n\t\timportReport = [];\n\t// Add the tiddlers to the store\n\timportReport.push($tw.language.getString(\"Import/Imported/Hint\") + \"\\n\");\n\t$tw.utils.each(importData.tiddlers,function(tiddlerFields) {\n\t\tvar title = tiddlerFields.title;\n\t\tif(title && importTiddler && importTiddler.fields[\"selection-\" + title] !== \"unchecked\") {\n\t\t\tself.wiki.addTiddler(new $tw.Tiddler(tiddlerFields));\n\t\t\timportReport.push(\"# [[\" + tiddlerFields.title + \"]]\");\n\t\t}\n\t});\n\t// Replace the $:/Import tiddler with an import report\n\tthis.wiki.addTiddler(new $tw.Tiddler({\n\t\ttitle: event.param,\n\t\ttext: importReport.join(\"\\n\"),\n\t\t\"status\": \"complete\"\n\t}));\n\t// Navigate to the $:/Import tiddler\n\tthis.addToHistory([event.param]);\n\t// Trigger an autosave\n\t$tw.rootWidget.dispatchEvent({type: \"tm-auto-save-wiki\"});\n};\n\nNavigatorWidget.prototype.handleFoldTiddlerEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {};\n\tif(paramObject.foldedState) {\n\t\tvar foldedState = this.wiki.getTiddlerText(paramObject.foldedState,\"show\") === \"show\" ? \"hide\" : \"show\";\n\t\tthis.wiki.setText(paramObject.foldedState,\"text\",null,foldedState);\n\t}\n};\n\nNavigatorWidget.prototype.handleFoldOtherTiddlersEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {},\n\t\tprefix = paramObject.foldedStatePrefix;\n\t$tw.utils.each(this.getStoryList(),function(title) {\n\t\tself.wiki.setText(prefix + title,\"text\",null,event.param === title ? \"show\" : \"hide\");\n\t});\n};\n\nNavigatorWidget.prototype.handleFoldAllTiddlersEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {},\n\t\tprefix = paramObject.foldedStatePrefix;\n\t$tw.utils.each(this.getStoryList(),function(title) {\n\t\tself.wiki.setText(prefix + title,\"text\",null,\"hide\");\n\t});\n};\n\nNavigatorWidget.prototype.handleUnfoldAllTiddlersEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {},\n\t\tprefix = paramObject.foldedStatePrefix;\n\t$tw.utils.each(this.getStoryList(),function(title) {\n\t\tself.wiki.setText(prefix + title,\"text\",null,\"show\");\n\t});\n};\n\nNavigatorWidget.prototype.handleRenameTiddlerEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {},\n\t\tfrom = paramObject.from || event.tiddlerTitle,\n\t\tto = paramObject.to;\n\t$tw.wiki.renameTiddler(from,to);\n};\n\nexports.navigator = NavigatorWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/navigator.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/password.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/password.js\ntype: application/javascript\nmodule-type: widget\n\nPassword widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar PasswordWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nPasswordWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nPasswordWidget.prototype.render = function(parent,nextSibling) {\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Get the current password\n\tvar password = $tw.browser ? $tw.utils.getPassword(this.passwordName) || \"\" : \"\";\n\t// Create our element\n\tvar domNode = this.document.createElement(\"input\");\n\tdomNode.setAttribute(\"type\",\"password\");\n\tdomNode.setAttribute(\"value\",password);\n\t// Add a click event handler\n\t$tw.utils.addEventListeners(domNode,[\n\t\t{name: \"change\", handlerObject: this, handlerMethod: \"handleChangeEvent\"}\n\t]);\n\t// Insert the label into the DOM and render any children\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\nPasswordWidget.prototype.handleChangeEvent = function(event) {\n\tvar password = this.domNodes[0].value;\n\treturn $tw.utils.savePassword(this.passwordName,password);\n};\n\n/*\nCompute the internal state of the widget\n*/\nPasswordWidget.prototype.execute = function() {\n\t// Get the parameters from the attributes\n\tthis.passwordName = this.getAttribute(\"name\",\"\");\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nPasswordWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.name) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\n\t}\n};\n\nexports.password = PasswordWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/password.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/radio.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/radio.js\ntype: application/javascript\nmodule-type: widget\n\nRadio widget\n\nWill set a field to the selected value:\n\n```\n\t<$radio field=\"myfield\" value=\"check 1\">one</$radio>\n\t<$radio field=\"myfield\" value=\"check 2\">two</$radio>\n\t<$radio field=\"myfield\" value=\"check 3\">three</$radio>\n```\n\n|Parameter |Description |h\n|tiddler |Name of the tiddler in which the field should be set. Defaults to current tiddler |\n|field |The name of the field to be set |\n|value |The value to set |\n|class |Optional class name(s) |\n\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar RadioWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nRadioWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nRadioWidget.prototype.render = function(parent,nextSibling) {\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Create our elements\n\tthis.labelDomNode = this.document.createElement(\"label\");\n\tthis.labelDomNode.setAttribute(\"class\",this.radioClass);\n\tthis.inputDomNode = this.document.createElement(\"input\");\n\tthis.inputDomNode.setAttribute(\"type\",\"radio\");\n\tif(this.getValue() == this.radioValue) {\n\t\tthis.inputDomNode.setAttribute(\"checked\",\"true\");\n\t}\n\tthis.labelDomNode.appendChild(this.inputDomNode);\n\tthis.spanDomNode = this.document.createElement(\"span\");\n\tthis.labelDomNode.appendChild(this.spanDomNode);\n\t// Add a click event handler\n\t$tw.utils.addEventListeners(this.inputDomNode,[\n\t\t{name: \"change\", handlerObject: this, handlerMethod: \"handleChangeEvent\"}\n\t]);\n\t// Insert the label into the DOM and render any children\n\tparent.insertBefore(this.labelDomNode,nextSibling);\n\tthis.renderChildren(this.spanDomNode,null);\n\tthis.domNodes.push(this.labelDomNode);\n};\n\nRadioWidget.prototype.getValue = function() {\n\tvar tiddler = this.wiki.getTiddler(this.radioTitle);\n\treturn tiddler && tiddler.getFieldString(this.radioField);\n};\n\nRadioWidget.prototype.setValue = function() {\n\tif(this.radioField) {\n\t\tvar tiddler = this.wiki.getTiddler(this.radioTitle),\n\t\t\taddition = {};\n\t\taddition[this.radioField] = this.radioValue;\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(this.wiki.getCreationFields(),{title: this.radioTitle},tiddler,addition,this.wiki.getModificationFields()));\n\t}\n};\n\nRadioWidget.prototype.handleChangeEvent = function(event) {\n\tif(this.inputDomNode.checked) {\n\t\tthis.setValue();\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nRadioWidget.prototype.execute = function() {\n\t// Get the parameters from the attributes\n\tthis.radioTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.radioField = this.getAttribute(\"field\",\"text\");\n\tthis.radioValue = this.getAttribute(\"value\");\n\tthis.radioClass = this.getAttribute(\"class\",\"\");\n\tif(this.radioClass !== \"\") {\n\t\tthis.radioClass += \" \";\n\t}\n\tthis.radioClass += \"tc-radio\";\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nRadioWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.value || changedAttributes[\"class\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\tvar refreshed = false;\n\t\tif(changedTiddlers[this.radioTitle]) {\n\t\t\tthis.inputDomNode.checked = this.getValue() === this.radioValue;\n\t\t\trefreshed = true;\n\t\t}\n\t\treturn this.refreshChildren(changedTiddlers) || refreshed;\n\t}\n};\n\nexports.radio = RadioWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/radio.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/raw.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/raw.js\ntype: application/javascript\nmodule-type: widget\n\nRaw widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar RawWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nRawWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nRawWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.execute();\n\tvar div = this.document.createElement(\"div\");\n\tdiv.innerHTML=this.parseTreeNode.html;\n\tparent.insertBefore(div,nextSibling);\n\tthis.domNodes.push(div);\t\n};\n\n/*\nCompute the internal state of the widget\n*/\nRawWidget.prototype.execute = function() {\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nRawWidget.prototype.refresh = function(changedTiddlers) {\n\treturn false;\n};\n\nexports.raw = RawWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/raw.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/reveal.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/reveal.js\ntype: application/javascript\nmodule-type: widget\n\nReveal widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar RevealWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nRevealWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nRevealWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar tag = this.parseTreeNode.isBlock ? \"div\" : \"span\";\n\tif(this.revealTag && $tw.config.htmlUnsafeElements.indexOf(this.revealTag) === -1) {\n\t\ttag = this.revealTag;\n\t}\n\tvar domNode = this.document.createElement(tag);\n\tvar classes = this[\"class\"].split(\" \") || [];\n\tclasses.push(\"tc-reveal\");\n\tdomNode.className = classes.join(\" \");\n\tif(this.style) {\n\t\tdomNode.setAttribute(\"style\",this.style);\n\t}\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tif(!domNode.isTiddlyWikiFakeDom && this.type === \"popup\" && this.isOpen) {\n\t\tthis.positionPopup(domNode);\n\t\t$tw.utils.addClass(domNode,\"tc-popup\"); // Make sure that clicks don't dismiss popups within the revealed content\n\t}\n\tif(!this.isOpen) {\n\t\tdomNode.setAttribute(\"hidden\",\"true\");\n\t}\n\tthis.domNodes.push(domNode);\n};\n\nRevealWidget.prototype.positionPopup = function(domNode) {\n\tdomNode.style.position = \"absolute\";\n\tdomNode.style.zIndex = \"1000\";\n\tswitch(this.position) {\n\t\tcase \"left\":\n\t\t\tdomNode.style.left = (this.popup.left - domNode.offsetWidth) + \"px\";\n\t\t\tdomNode.style.top = this.popup.top + \"px\";\n\t\t\tbreak;\n\t\tcase \"above\":\n\t\t\tdomNode.style.left = this.popup.left + \"px\";\n\t\t\tdomNode.style.top = (this.popup.top - domNode.offsetHeight) + \"px\";\n\t\t\tbreak;\n\t\tcase \"aboveright\":\n\t\t\tdomNode.style.left = (this.popup.left + this.popup.width) + \"px\";\n\t\t\tdomNode.style.top = (this.popup.top + this.popup.height - domNode.offsetHeight) + \"px\";\n\t\t\tbreak;\n\t\tcase \"right\":\n\t\t\tdomNode.style.left = (this.popup.left + this.popup.width) + \"px\";\n\t\t\tdomNode.style.top = this.popup.top + \"px\";\n\t\t\tbreak;\n\t\tcase \"belowleft\":\n\t\t\tdomNode.style.left = (this.popup.left + this.popup.width - domNode.offsetWidth) + \"px\";\n\t\t\tdomNode.style.top = (this.popup.top + this.popup.height) + \"px\";\n\t\t\tbreak;\n\t\tdefault: // Below\n\t\t\tdomNode.style.left = this.popup.left + \"px\";\n\t\t\tdomNode.style.top = (this.popup.top + this.popup.height) + \"px\";\n\t\t\tbreak;\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nRevealWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.state = this.getAttribute(\"state\");\n\tthis.revealTag = this.getAttribute(\"tag\");\n\tthis.type = this.getAttribute(\"type\");\n\tthis.text = this.getAttribute(\"text\");\n\tthis.position = this.getAttribute(\"position\");\n\tthis[\"class\"] = this.getAttribute(\"class\",\"\");\n\tthis.style = this.getAttribute(\"style\",\"\");\n\tthis[\"default\"] = this.getAttribute(\"default\",\"\");\n\tthis.animate = this.getAttribute(\"animate\",\"no\");\n\tthis.retain = this.getAttribute(\"retain\",\"no\");\n\tthis.openAnimation = this.animate === \"no\" ? undefined : \"open\";\n\tthis.closeAnimation = this.animate === \"no\" ? undefined : \"close\";\n\t// Compute the title of the state tiddler and read it\n\tthis.stateTitle = this.state;\n\tthis.readState();\n\t// Construct the child widgets\n\tvar childNodes = this.isOpen ? this.parseTreeNode.children : [];\n\tthis.hasChildNodes = this.isOpen;\n\tthis.makeChildWidgets(childNodes);\n};\n\n/*\nRead the state tiddler\n*/\nRevealWidget.prototype.readState = function() {\n\t// Read the information from the state tiddler\n\tvar state = this.stateTitle ? this.wiki.getTextReference(this.stateTitle,this[\"default\"],this.getVariable(\"currentTiddler\")) : this[\"default\"];\n\tswitch(this.type) {\n\t\tcase \"popup\":\n\t\t\tthis.readPopupState(state);\n\t\t\tbreak;\n\t\tcase \"match\":\n\t\t\tthis.readMatchState(state);\n\t\t\tbreak;\n\t\tcase \"nomatch\":\n\t\t\tthis.readMatchState(state);\n\t\t\tthis.isOpen = !this.isOpen;\n\t\t\tbreak;\n\t}\n};\n\nRevealWidget.prototype.readMatchState = function(state) {\n\tthis.isOpen = state === this.text;\n};\n\nRevealWidget.prototype.readPopupState = function(state) {\n\tvar popupLocationRegExp = /^\\((-?[0-9\\.E]+),(-?[0-9\\.E]+),(-?[0-9\\.E]+),(-?[0-9\\.E]+)\\)$/,\n\t\tmatch = popupLocationRegExp.exec(state);\n\t// Check if the state matches the location regexp\n\tif(match) {\n\t\t// If so, we're open\n\t\tthis.isOpen = true;\n\t\t// Get the location\n\t\tthis.popup = {\n\t\t\tleft: parseFloat(match[1]),\n\t\t\ttop: parseFloat(match[2]),\n\t\t\twidth: parseFloat(match[3]),\n\t\t\theight: parseFloat(match[4])\n\t\t};\n\t} else {\n\t\t// If not, we're closed\n\t\tthis.isOpen = false;\n\t}\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nRevealWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.state || changedAttributes.type || changedAttributes.text || changedAttributes.position || changedAttributes[\"default\"] || changedAttributes.animate) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\tvar refreshed = false,\n\t\t\tcurrentlyOpen = this.isOpen;\n\t\tthis.readState();\n\t\tif(this.isOpen !== currentlyOpen) {\n\t\t\tif(this.retain === \"yes\") {\n\t\t\t\tthis.updateState();\n\t\t\t} else {\n\t\t\t\tthis.refreshSelf();\n\t\t\t\trefreshed = true;\n\t\t\t}\n\t\t}\n\t\treturn this.refreshChildren(changedTiddlers) || refreshed;\n\t}\n};\n\n/*\nCalled by refresh() to dynamically show or hide the content\n*/\nRevealWidget.prototype.updateState = function() {\n\t// Read the current state\n\tthis.readState();\n\t// Construct the child nodes if needed\n\tvar domNode = this.domNodes[0];\n\tif(this.isOpen && !this.hasChildNodes) {\n\t\tthis.hasChildNodes = true;\n\t\tthis.makeChildWidgets(this.parseTreeNode.children);\n\t\tthis.renderChildren(domNode,null);\n\t}\n\t// Animate our DOM node\n\tif(!domNode.isTiddlyWikiFakeDom && this.type === \"popup\" && this.isOpen) {\n\t\tthis.positionPopup(domNode);\n\t\t$tw.utils.addClass(domNode,\"tc-popup\"); // Make sure that clicks don't dismiss popups within the revealed content\n\n\t}\n\tif(this.isOpen) {\n\t\tdomNode.removeAttribute(\"hidden\");\n        $tw.anim.perform(this.openAnimation,domNode);\n\t} else {\n\t\t$tw.anim.perform(this.closeAnimation,domNode,{callback: function() {\n\t\t\tdomNode.setAttribute(\"hidden\",\"true\");\n        }});\n\t}\n};\n\nexports.reveal = RevealWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/reveal.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/scrollable.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/scrollable.js\ntype: application/javascript\nmodule-type: widget\n\nScrollable widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ScrollableWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n\tthis.scaleFactor = 1;\n\tthis.addEventListeners([\n\t\t{type: \"tm-scroll\", handler: \"handleScrollEvent\"}\n\t]);\n\tif($tw.browser) {\n\t\tthis.requestAnimationFrame = window.requestAnimationFrame ||\n\t\t\twindow.webkitRequestAnimationFrame ||\n\t\t\twindow.mozRequestAnimationFrame ||\n\t\t\tfunction(callback) {\n\t\t\t\treturn window.setTimeout(callback, 1000/60);\n\t\t\t};\n\t\tthis.cancelAnimationFrame = window.cancelAnimationFrame ||\n\t\t\twindow.webkitCancelAnimationFrame ||\n\t\t\twindow.webkitCancelRequestAnimationFrame ||\n\t\t\twindow.mozCancelAnimationFrame ||\n\t\t\twindow.mozCancelRequestAnimationFrame ||\n\t\t\tfunction(id) {\n\t\t\t\twindow.clearTimeout(id);\n\t\t\t};\n\t}\n};\n\n/*\nInherit from the base widget class\n*/\nScrollableWidget.prototype = new Widget();\n\nScrollableWidget.prototype.cancelScroll = function() {\n\tif(this.idRequestFrame) {\n\t\tthis.cancelAnimationFrame.call(window,this.idRequestFrame);\n\t\tthis.idRequestFrame = null;\n\t}\n};\n\n/*\nHandle a scroll event\n*/\nScrollableWidget.prototype.handleScrollEvent = function(event) {\n\t// Pass the scroll event through if our offsetsize is larger than our scrollsize\n\tif(this.outerDomNode.scrollWidth <= this.outerDomNode.offsetWidth && this.outerDomNode.scrollHeight <= this.outerDomNode.offsetHeight && this.fallthrough === \"yes\") {\n\t\treturn true;\n\t}\n\tthis.scrollIntoView(event.target);\n\treturn false; // Handled event\n};\n\n/*\nScroll an element into view\n*/\nScrollableWidget.prototype.scrollIntoView = function(element) {\n\tvar duration = $tw.utils.getAnimationDuration();\n\tthis.cancelScroll();\n\tthis.startTime = Date.now();\n\tvar scrollPosition = {\n\t\tx: this.outerDomNode.scrollLeft,\n\t\ty: this.outerDomNode.scrollTop\n\t};\n\t// Get the client bounds of the element and adjust by the scroll position\n\tvar scrollableBounds = this.outerDomNode.getBoundingClientRect(),\n\t\tclientTargetBounds = element.getBoundingClientRect(),\n\t\tbounds = {\n\t\t\tleft: clientTargetBounds.left + scrollPosition.x - scrollableBounds.left,\n\t\t\ttop: clientTargetBounds.top + scrollPosition.y - scrollableBounds.top,\n\t\t\twidth: clientTargetBounds.width,\n\t\t\theight: clientTargetBounds.height\n\t\t};\n\t// We'll consider the horizontal and vertical scroll directions separately via this function\n\tvar getEndPos = function(targetPos,targetSize,currentPos,currentSize) {\n\t\t\t// If the target is already visible then stay where we are\n\t\t\tif(targetPos >= currentPos && (targetPos + targetSize) <= (currentPos + currentSize)) {\n\t\t\t\treturn currentPos;\n\t\t\t// If the target is above/left of the current view, then scroll to its top/left\n\t\t\t} else if(targetPos <= currentPos) {\n\t\t\t\treturn targetPos;\n\t\t\t// If the target is smaller than the window and the scroll position is too far up, then scroll till the target is at the bottom of the window\n\t\t\t} else if(targetSize < currentSize && currentPos < (targetPos + targetSize - currentSize)) {\n\t\t\t\treturn targetPos + targetSize - currentSize;\n\t\t\t// If the target is big, then just scroll to the top\n\t\t\t} else if(currentPos < targetPos) {\n\t\t\t\treturn targetPos;\n\t\t\t// Otherwise, stay where we are\n\t\t\t} else {\n\t\t\t\treturn currentPos;\n\t\t\t}\n\t\t},\n\t\tendX = getEndPos(bounds.left,bounds.width,scrollPosition.x,this.outerDomNode.offsetWidth),\n\t\tendY = getEndPos(bounds.top,bounds.height,scrollPosition.y,this.outerDomNode.offsetHeight);\n\t// Only scroll if necessary\n\tif(endX !== scrollPosition.x || endY !== scrollPosition.y) {\n\t\tvar self = this,\n\t\t\tdrawFrame;\n\t\tdrawFrame = function () {\n\t\t\tvar t;\n\t\t\tif(duration <= 0) {\n\t\t\t\tt = 1;\n\t\t\t} else {\n\t\t\t\tt = ((Date.now()) - self.startTime) / duration;\t\n\t\t\t}\n\t\t\tif(t >= 1) {\n\t\t\t\tself.cancelScroll();\n\t\t\t\tt = 1;\n\t\t\t}\n\t\t\tt = $tw.utils.slowInSlowOut(t);\n\t\t\tself.outerDomNode.scrollLeft = scrollPosition.x + (endX - scrollPosition.x) * t;\n\t\t\tself.outerDomNode.scrollTop = scrollPosition.y + (endY - scrollPosition.y) * t;\n\t\t\tif(t < 1) {\n\t\t\t\tself.idRequestFrame = self.requestAnimationFrame.call(window,drawFrame);\n\t\t\t}\n\t\t};\n\t\tdrawFrame();\n\t}\n};\n\n/*\nRender this widget into the DOM\n*/\nScrollableWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create elements\n\tthis.outerDomNode = this.document.createElement(\"div\");\n\t$tw.utils.setStyle(this.outerDomNode,[\n\t\t{overflowY: \"auto\"},\n\t\t{overflowX: \"auto\"},\n\t\t{webkitOverflowScrolling: \"touch\"}\n\t]);\n\tthis.innerDomNode = this.document.createElement(\"div\");\n\tthis.outerDomNode.appendChild(this.innerDomNode);\n\t// Assign classes\n\tthis.outerDomNode.className = this[\"class\"] || \"\";\n\t// Insert element\n\tparent.insertBefore(this.outerDomNode,nextSibling);\n\tthis.renderChildren(this.innerDomNode,null);\n\tthis.domNodes.push(this.outerDomNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nScrollableWidget.prototype.execute = function() {\n\t// Get attributes\n\tthis.fallthrough = this.getAttribute(\"fallthrough\",\"yes\");\n\tthis[\"class\"] = this.getAttribute(\"class\");\n\t// Make child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nScrollableWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"class\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.scrollable = ScrollableWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/scrollable.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/select.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/select.js\ntype: application/javascript\nmodule-type: widget\n\nSelect widget:\n\n```\n<$select tiddler=\"MyTiddler\" field=\"text\">\n<$list filter=\"[tag[chapter]]\">\n<option value=<<currentTiddler>>>\n<$view field=\"description\"/>\n</option>\n</$list>\n</$select>\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar SelectWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nSelectWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nSelectWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n\tthis.setSelectValue();\n\t$tw.utils.addEventListeners(this.getSelectDomNode(),[\n\t\t{name: \"change\", handlerObject: this, handlerMethod: \"handleChangeEvent\"}\n\t]);\n};\n\n/*\nHandle a change event\n*/\nSelectWidget.prototype.handleChangeEvent = function(event) {\n\t// Get the new value and assign it to the tiddler\n\tif(this.selectMultiple == false) {\n\t\tvar value = this.getSelectDomNode().value;\n\t} else {\n\t\tvar value = this.getSelectValues()\n\t\t\t\tvalue = $tw.utils.stringifyList(value);\n\t}\n\tthis.wiki.setText(this.selectTitle,this.selectField,this.selectIndex,value);\n\t// Trigger actions\n\tif(this.selectActions) {\n\t\tthis.invokeActionString(this.selectActions,this,event);\n\t}\n};\n\n/*\nIf necessary, set the value of the select element to the current value\n*/\nSelectWidget.prototype.setSelectValue = function() {\n\tvar value = this.selectDefault;\n\t// Get the value\n\tif(this.selectIndex) {\n\t\tvalue = this.wiki.extractTiddlerDataItem(this.selectTitle,this.selectIndex);\n\t} else {\n\t\tvar tiddler = this.wiki.getTiddler(this.selectTitle);\n\t\tif(tiddler) {\n\t\t\tif(this.selectField === \"text\") {\n\t\t\t\t// Calling getTiddlerText() triggers lazy loading of skinny tiddlers\n\t\t\t\tvalue = this.wiki.getTiddlerText(this.selectTitle);\n\t\t\t} else {\n\t\t\t\tif($tw.utils.hop(tiddler.fields,this.selectField)) {\n\t\t\t\t\tvalue = tiddler.getFieldString(this.selectField);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif(this.selectField === \"title\") {\n\t\t\t\tvalue = this.selectTitle;\n\t\t\t}\n\t\t}\n\t}\n\t// Assign it to the select element if it's different than the current value\n\tif (this.selectMultiple) {\n\t\tvalue = value === undefined ? \"\" : value;\n\t\tvar select = this.getSelectDomNode();\n\t\tvar values = Array.isArray(value) ? value : $tw.utils.parseStringArray(value);\n\t\tfor(var i=0; i < select.children.length; i++){\n\t\t\tif(values.indexOf(select.children[i].value) != -1) {\n\t\t\t\tselect.children[i].selected = true;\n\t\t\t}\n\t\t}\n\t\t\n\t} else {\n\t\tvar domNode = this.getSelectDomNode();\n\t\tif(domNode.value !== value) {\n\t\t\tdomNode.value = value;\n\t\t}\n\t}\n};\n\n/*\nGet the DOM node of the select element\n*/\nSelectWidget.prototype.getSelectDomNode = function() {\n\treturn this.children[0].domNodes[0];\n};\n\n// Return an array of the selected opion values\n// select is an HTML select element\nSelectWidget.prototype.getSelectValues = function() {\n\tvar select, result, options, opt;\n\tselect = this.getSelectDomNode();\n\tresult = [];\n\toptions = select && select.options;\n\tfor (var i=0; i<options.length; i++) {\n\t\topt = options[i];\n\t\tif (opt.selected) {\n\t\t\tresult.push(opt.value || opt.text);\n\t\t}\n\t}\n\treturn result;\n}\n\n/*\nCompute the internal state of the widget\n*/\nSelectWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.selectActions = this.getAttribute(\"actions\");\n\tthis.selectTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.selectField = this.getAttribute(\"field\",\"text\");\n\tthis.selectIndex = this.getAttribute(\"index\");\n\tthis.selectClass = this.getAttribute(\"class\");\n\tthis.selectDefault = this.getAttribute(\"default\");\n\tthis.selectMultiple = this.getAttribute(\"multiple\", false);\n\tthis.selectSize = this.getAttribute(\"size\");\n\t// Make the child widgets\n\tvar selectNode = {\n\t\ttype: \"element\",\n\t\ttag: \"select\",\n\t\tchildren: this.parseTreeNode.children\n\t};\n\tif(this.selectClass) {\n\t\t$tw.utils.addAttributeToParseTreeNode(selectNode,\"class\",this.selectClass);\n\t}\n\tif(this.selectMultiple) {\n\t\t$tw.utils.addAttributeToParseTreeNode(selectNode,\"multiple\",\"multiple\");\n\t}\n\tif(this.selectSize) {\n\t\t$tw.utils.addAttributeToParseTreeNode(selectNode,\"size\",this.selectSize);\n\t}\n\tthis.makeChildWidgets([selectNode]);\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nSelectWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\t// If we're using a different tiddler/field/index then completely refresh ourselves\n\tif(changedAttributes.selectTitle || changedAttributes.selectField || changedAttributes.selectIndex) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t// If the target tiddler value has changed, just update setting and refresh the children\n\t} else {\n\t\tvar childrenRefreshed = this.refreshChildren(changedTiddlers);\n\t\tif(changedTiddlers[this.selectTitle] || childrenRefreshed) {\n\t\t\tthis.setSelectValue();\n\t\t} \n\t\treturn childrenRefreshed;\n\t}\n};\n\nexports.select = SelectWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/select.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/set.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/set.js\ntype: application/javascript\nmodule-type: widget\n\nSet variable widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar SetWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nSetWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nSetWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nSetWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.setName = this.getAttribute(\"name\",\"currentTiddler\");\n\tthis.setFilter = this.getAttribute(\"filter\");\n\tthis.setValue = this.getAttribute(\"value\");\n\tthis.setEmptyValue = this.getAttribute(\"emptyValue\");\n\t// Set context variable\n\tthis.setVariable(this.setName,this.getValue(),this.parseTreeNode.params);\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nGet the value to be assigned\n*/\nSetWidget.prototype.getValue = function() {\n\tvar value = this.setValue;\n\tif(this.setFilter) {\n\t\tvar results = this.wiki.filterTiddlers(this.setFilter,this);\n\t\tif(!this.setValue) {\n\t\t\tvalue = $tw.utils.stringifyList(results);\n\t\t}\n\t\tif(results.length === 0 && this.setEmptyValue !== undefined) {\n\t\t\tvalue = this.setEmptyValue;\n\t\t}\n\t} else if(!value && this.setEmptyValue) {\n\t\tvalue = this.setEmptyValue;\n\t}\n\treturn value;\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nSetWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.name || changedAttributes.filter || changedAttributes.value || changedAttributes.emptyValue ||\n\t   (this.setFilter && this.getValue() != this.variables[this.setName].value)) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\n\t}\n};\n\nexports.setvariable = SetWidget;\nexports.set = SetWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/set.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/text.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/text.js\ntype: application/javascript\nmodule-type: widget\n\nText node widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar TextNodeWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nTextNodeWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nTextNodeWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar text = this.getAttribute(\"text\",this.parseTreeNode.text || \"\");\n\ttext = text.replace(/\\r/mg,\"\");\n\tvar textNode = this.document.createTextNode(text);\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nTextNodeWidget.prototype.execute = function() {\n\t// Nothing to do for a text node\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nTextNodeWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.text) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports.text = TextNodeWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/text.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/tiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/tiddler.js\ntype: application/javascript\nmodule-type: widget\n\nTiddler widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar TiddlerWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nTiddlerWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nTiddlerWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nTiddlerWidget.prototype.execute = function() {\n\tthis.tiddlerState = this.computeTiddlerState();\n\tthis.setVariable(\"currentTiddler\",this.tiddlerState.currentTiddler);\n\tthis.setVariable(\"missingTiddlerClass\",this.tiddlerState.missingTiddlerClass);\n\tthis.setVariable(\"shadowTiddlerClass\",this.tiddlerState.shadowTiddlerClass);\n\tthis.setVariable(\"systemTiddlerClass\",this.tiddlerState.systemTiddlerClass);\n\tthis.setVariable(\"tiddlerTagClasses\",this.tiddlerState.tiddlerTagClasses);\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nCompute the tiddler state flags\n*/\nTiddlerWidget.prototype.computeTiddlerState = function() {\n\t// Get our parameters\n\tthis.tiddlerTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\t// Compute the state\n\tvar state = {\n\t\tcurrentTiddler: this.tiddlerTitle || \"\",\n\t\tmissingTiddlerClass: (this.wiki.tiddlerExists(this.tiddlerTitle) || this.wiki.isShadowTiddler(this.tiddlerTitle)) ? \"tc-tiddler-exists\" : \"tc-tiddler-missing\",\n\t\tshadowTiddlerClass: this.wiki.isShadowTiddler(this.tiddlerTitle) ? \"tc-tiddler-shadow\" : \"\",\n\t\tsystemTiddlerClass: this.wiki.isSystemTiddler(this.tiddlerTitle) ? \"tc-tiddler-system\" : \"\",\n\t\ttiddlerTagClasses: this.getTagClasses()\n\t};\n\t// Compute a simple hash to make it easier to detect changes\n\tstate.hash = state.currentTiddler + state.missingTiddlerClass + state.shadowTiddlerClass + state.systemTiddlerClass + state.tiddlerTagClasses;\n\treturn state;\n};\n\n/*\nCreate a string of CSS classes derived from the tags of the current tiddler\n*/\nTiddlerWidget.prototype.getTagClasses = function() {\n\tvar tiddler = this.wiki.getTiddler(this.tiddlerTitle);\n\tif(tiddler) {\n\t\tvar tags = [];\n\t\t$tw.utils.each(tiddler.fields.tags,function(tag) {\n\t\t\ttags.push(\"tc-tagged-\" + encodeURIComponent(tag));\n\t\t});\n\t\treturn tags.join(\" \");\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nTiddlerWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes(),\n\t\tnewTiddlerState = this.computeTiddlerState();\n\tif(changedAttributes.tiddler || newTiddlerState.hash !== this.tiddlerState.hash) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nexports.tiddler = TiddlerWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/tiddler.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/transclude.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/transclude.js\ntype: application/javascript\nmodule-type: widget\n\nTransclude widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar TranscludeWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nTranscludeWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nTranscludeWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nTranscludeWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.transcludeTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.transcludeSubTiddler = this.getAttribute(\"subtiddler\");\n\tthis.transcludeField = this.getAttribute(\"field\");\n\tthis.transcludeIndex = this.getAttribute(\"index\");\n\tthis.transcludeMode = this.getAttribute(\"mode\");\n\t// Parse the text reference\n\tvar parseAsInline = !this.parseTreeNode.isBlock;\n\tif(this.transcludeMode === \"inline\") {\n\t\tparseAsInline = true;\n\t} else if(this.transcludeMode === \"block\") {\n\t\tparseAsInline = false;\n\t}\n\tvar parser = this.wiki.parseTextReference(\n\t\t\t\t\t\tthis.transcludeTitle,\n\t\t\t\t\t\tthis.transcludeField,\n\t\t\t\t\t\tthis.transcludeIndex,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tparseAsInline: parseAsInline,\n\t\t\t\t\t\t\tsubTiddler: this.transcludeSubTiddler\n\t\t\t\t\t\t}),\n\t\tparseTreeNodes = parser ? parser.tree : this.parseTreeNode.children;\n\t// Set context variables for recursion detection\n\tvar recursionMarker = this.makeRecursionMarker();\n\tthis.setVariable(\"transclusion\",recursionMarker);\n\t// Check for recursion\n\tif(parser) {\n\t\tif(this.parentWidget && this.parentWidget.hasVariable(\"transclusion\",recursionMarker)) {\n\t\t\tparseTreeNodes = [{type: \"element\", tag: \"span\", attributes: {\n\t\t\t\t\"class\": {type: \"string\", value: \"tc-error\"}\n\t\t\t}, children: [\n\t\t\t\t{type: \"text\", text: $tw.language.getString(\"Error/RecursiveTransclusion\")}\n\t\t\t]}];\n\t\t}\n\t}\n\t// Construct the child widgets\n\tthis.makeChildWidgets(parseTreeNodes);\n};\n\n/*\nCompose a string comprising the title, field and/or index to identify this transclusion for recursion detection\n*/\nTranscludeWidget.prototype.makeRecursionMarker = function() {\n\tvar output = [];\n\toutput.push(\"{\");\n\toutput.push(this.getVariable(\"currentTiddler\",{defaultValue: \"\"}));\n\toutput.push(\"|\");\n\toutput.push(this.transcludeTitle || \"\");\n\toutput.push(\"|\");\n\toutput.push(this.transcludeField || \"\");\n\toutput.push(\"|\");\n\toutput.push(this.transcludeIndex || \"\");\n\toutput.push(\"|\");\n\toutput.push(this.transcludeSubTiddler || \"\");\n\toutput.push(\"}\");\n\treturn output.join(\"\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nTranscludeWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || changedTiddlers[this.transcludeTitle]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nexports.transclude = TranscludeWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/transclude.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/vars.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/vars.js\ntype: application/javascript\nmodule-type: widget\n\nThis widget allows multiple variables to be set in one go:\n\n```\n\\define helloworld() Hello world!\n<$vars greeting=\"Hi\" me={{!!title}} sentence=<<helloworld>>>\n  <<greeting>>! I am <<me>> and I say: <<sentence>>\n</$vars>\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar VarsWidget = function(parseTreeNode,options) {\n\t// Call the constructor\n\tWidget.call(this);\n\t// Initialise\t\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nVarsWidget.prototype = Object.create(Widget.prototype);\n\n/*\nRender this widget into the DOM\n*/\nVarsWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nVarsWidget.prototype.execute = function() {\n\t// Parse variables\n\tvar self = this;\n\t$tw.utils.each(this.attributes,function(val,key) {\n\t\tif(key.charAt(0) !== \"$\") {\n\t\t\tself.setVariable(key,val);\n\t\t}\n\t});\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nVarsWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(Object.keys(changedAttributes).length) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports[\"vars\"] = VarsWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/vars.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/view.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/view.js\ntype: application/javascript\nmodule-type: widget\n\nView widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ViewWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nViewWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nViewWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tif(this.text) {\n\t\tvar textNode = this.document.createTextNode(this.text);\n\t\tparent.insertBefore(textNode,nextSibling);\n\t\tthis.domNodes.push(textNode);\n\t} else {\n\t\tthis.makeChildWidgets();\n\t\tthis.renderChildren(parent,nextSibling);\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nViewWidget.prototype.execute = function() {\n\t// Get parameters from our attributes\n\tthis.viewTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.viewSubtiddler = this.getAttribute(\"subtiddler\");\n\tthis.viewField = this.getAttribute(\"field\",\"text\");\n\tthis.viewIndex = this.getAttribute(\"index\");\n\tthis.viewFormat = this.getAttribute(\"format\",\"text\");\n\tthis.viewTemplate = this.getAttribute(\"template\",\"\");\n\tswitch(this.viewFormat) {\n\t\tcase \"htmlwikified\":\n\t\t\tthis.text = this.getValueAsHtmlWikified();\n\t\t\tbreak;\n\t\tcase \"plainwikified\":\n\t\t\tthis.text = this.getValueAsPlainWikified();\n\t\t\tbreak;\n\t\tcase \"htmlencodedplainwikified\":\n\t\t\tthis.text = this.getValueAsHtmlEncodedPlainWikified();\n\t\t\tbreak;\n\t\tcase \"htmlencoded\":\n\t\t\tthis.text = this.getValueAsHtmlEncoded();\n\t\t\tbreak;\n\t\tcase \"urlencoded\":\n\t\t\tthis.text = this.getValueAsUrlEncoded();\n\t\t\tbreak;\n\t\tcase \"doubleurlencoded\":\n\t\t\tthis.text = this.getValueAsDoubleUrlEncoded();\n\t\t\tbreak;\n\t\tcase \"date\":\n\t\t\tthis.text = this.getValueAsDate(this.viewTemplate);\n\t\t\tbreak;\n\t\tcase \"relativedate\":\n\t\t\tthis.text = this.getValueAsRelativeDate();\n\t\t\tbreak;\n\t\tcase \"stripcomments\":\n\t\t\tthis.text = this.getValueAsStrippedComments();\n\t\t\tbreak;\n\t\tcase \"jsencoded\":\n\t\t\tthis.text = this.getValueAsJsEncoded();\n\t\t\tbreak;\n\t\tdefault: // \"text\"\n\t\t\tthis.text = this.getValueAsText();\n\t\t\tbreak;\n\t}\n};\n\n/*\nThe various formatter functions are baked into this widget for the moment. Eventually they will be replaced by macro functions\n*/\n\n/*\nRetrieve the value of the widget. Options are:\nasString: Optionally return the value as a string\n*/\nViewWidget.prototype.getValue = function(options) {\n\toptions = options || {};\n\tvar value = options.asString ? \"\" : undefined;\n\tif(this.viewIndex) {\n\t\tvalue = this.wiki.extractTiddlerDataItem(this.viewTitle,this.viewIndex);\n\t} else {\n\t\tvar tiddler;\n\t\tif(this.viewSubtiddler) {\n\t\t\ttiddler = this.wiki.getSubTiddler(this.viewTitle,this.viewSubtiddler);\t\n\t\t} else {\n\t\t\ttiddler = this.wiki.getTiddler(this.viewTitle);\n\t\t}\n\t\tif(tiddler) {\n\t\t\tif(this.viewField === \"text\" && !this.viewSubtiddler) {\n\t\t\t\t// Calling getTiddlerText() triggers lazy loading of skinny tiddlers\n\t\t\t\tvalue = this.wiki.getTiddlerText(this.viewTitle);\n\t\t\t} else {\n\t\t\t\tif($tw.utils.hop(tiddler.fields,this.viewField)) {\n\t\t\t\t\tif(options.asString) {\n\t\t\t\t\t\tvalue = tiddler.getFieldString(this.viewField);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvalue = tiddler.fields[this.viewField];\t\t\t\t\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif(this.viewField === \"title\") {\n\t\t\t\tvalue = this.viewTitle;\n\t\t\t}\n\t\t}\n\t}\n\treturn value;\n};\n\nViewWidget.prototype.getValueAsText = function() {\n\treturn this.getValue({asString: true});\n};\n\nViewWidget.prototype.getValueAsHtmlWikified = function() {\n\treturn this.wiki.renderText(\"text/html\",\"text/vnd.tiddlywiki\",this.getValueAsText(),{parentWidget: this});\n};\n\nViewWidget.prototype.getValueAsPlainWikified = function() {\n\treturn this.wiki.renderText(\"text/plain\",\"text/vnd.tiddlywiki\",this.getValueAsText(),{parentWidget: this});\n};\n\nViewWidget.prototype.getValueAsHtmlEncodedPlainWikified = function() {\n\treturn $tw.utils.htmlEncode(this.wiki.renderText(\"text/plain\",\"text/vnd.tiddlywiki\",this.getValueAsText(),{parentWidget: this}));\n};\n\nViewWidget.prototype.getValueAsHtmlEncoded = function() {\n\treturn $tw.utils.htmlEncode(this.getValueAsText());\n};\n\nViewWidget.prototype.getValueAsUrlEncoded = function() {\n\treturn encodeURIComponent(this.getValueAsText());\n};\n\nViewWidget.prototype.getValueAsDoubleUrlEncoded = function() {\n\treturn encodeURIComponent(encodeURIComponent(this.getValueAsText()));\n};\n\nViewWidget.prototype.getValueAsDate = function(format) {\n\tformat = format || \"YYYY MM DD 0hh:0mm\";\n\tvar value = $tw.utils.parseDate(this.getValue());\n\tif(value && $tw.utils.isDate(value) && value.toString() !== \"Invalid Date\") {\n\t\treturn $tw.utils.formatDateString(value,format);\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\nViewWidget.prototype.getValueAsRelativeDate = function(format) {\n\tvar value = $tw.utils.parseDate(this.getValue());\n\tif(value && $tw.utils.isDate(value) && value.toString() !== \"Invalid Date\") {\n\t\treturn $tw.utils.getRelativeDate((new Date()) - (new Date(value))).description;\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\nViewWidget.prototype.getValueAsStrippedComments = function() {\n\tvar lines = this.getValueAsText().split(\"\\n\"),\n\t\tout = [];\n\tfor(var line=0; line<lines.length; line++) {\n\t\tvar text = lines[line];\n\t\tif(!/^\\s*\\/\\/#/.test(text)) {\n\t\t\tout.push(text);\n\t\t}\n\t}\n\treturn out.join(\"\\n\");\n};\n\nViewWidget.prototype.getValueAsJsEncoded = function() {\n\treturn $tw.utils.stringify(this.getValueAsText());\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nViewWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || changedAttributes.template || changedAttributes.format || changedTiddlers[this.viewTitle]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports.view = ViewWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/view.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/widget.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/widget.js\ntype: application/javascript\nmodule-type: widget\n\nWidget base class\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nCreate a widget object for a parse tree node\n\tparseTreeNode: reference to the parse tree node to be rendered\n\toptions: see below\nOptions include:\n\twiki: mandatory reference to wiki associated with this render tree\n\tparentWidget: optional reference to a parent renderer node for the context chain\n\tdocument: optional document object to use instead of global document\n*/\nvar Widget = function(parseTreeNode,options) {\n\tif(arguments.length > 0) {\n\t\tthis.initialise(parseTreeNode,options);\n\t}\n};\n\n/*\nInitialise widget properties. These steps are pulled out of the constructor so that we can reuse them in subclasses\n*/\nWidget.prototype.initialise = function(parseTreeNode,options) {\n\toptions = options || {};\n\t// Save widget info\n\tthis.parseTreeNode = parseTreeNode;\n\tthis.wiki = options.wiki;\n\tthis.parentWidget = options.parentWidget;\n\tthis.variablesConstructor = function() {};\n\tthis.variablesConstructor.prototype = this.parentWidget ? this.parentWidget.variables : {};\n\tthis.variables = new this.variablesConstructor();\n\tthis.document = options.document;\n\tthis.attributes = {};\n\tthis.children = [];\n\tthis.domNodes = [];\n\tthis.eventListeners = {};\n\t// Hashmap of the widget classes\n\tif(!this.widgetClasses) {\n\t\tWidget.prototype.widgetClasses = $tw.modules.applyMethods(\"widget\");\n\t}\n};\n\n/*\nRender this widget into the DOM\n*/\nWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nWidget.prototype.execute = function() {\n\tthis.makeChildWidgets();\n};\n\n/*\nSet the value of a context variable\nname: name of the variable\nvalue: value of the variable\nparams: array of {name:, default:} for each parameter\n*/\nWidget.prototype.setVariable = function(name,value,params) {\n\tthis.variables[name] = {value: value, params: params};\n};\n\n/*\nGet the prevailing value of a context variable\nname: name of variable\noptions: see below\nOptions include\nparams: array of {name:, value:} for each parameter\ndefaultValue: default value if the variable is not defined\n*/\nWidget.prototype.getVariable = function(name,options) {\n\toptions = options || {};\n\tvar actualParams = options.params || [],\n\t\tparentWidget = this.parentWidget;\n\t// Check for the variable defined in the parent widget (or an ancestor in the prototype chain)\n\tif(parentWidget && name in parentWidget.variables) {\n\t\tvar variable = parentWidget.variables[name],\n\t\t\tvalue = variable.value;\n\t\t// Substitute any parameters specified in the definition\n\t\tvalue = this.substituteVariableParameters(value,variable.params,actualParams);\n\t\tvalue = this.substituteVariableReferences(value);\n\t\treturn value;\n\t}\n\t// If the variable doesn't exist in the parent widget then look for a macro module\n\treturn this.evaluateMacroModule(name,actualParams,options.defaultValue);\n};\n\nWidget.prototype.substituteVariableParameters = function(text,formalParams,actualParams) {\n\tif(formalParams) {\n\t\tvar nextAnonParameter = 0, // Next candidate anonymous parameter in macro call\n\t\t\tparamInfo, paramValue;\n\t\t// Step through each of the parameters in the macro definition\n\t\tfor(var p=0; p<formalParams.length; p++) {\n\t\t\t// Check if we've got a macro call parameter with the same name\n\t\t\tparamInfo = formalParams[p];\n\t\t\tparamValue = undefined;\n\t\t\tfor(var m=0; m<actualParams.length; m++) {\n\t\t\t\tif(actualParams[m].name === paramInfo.name) {\n\t\t\t\t\tparamValue = actualParams[m].value;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If not, use the next available anonymous macro call parameter\n\t\t\twhile(nextAnonParameter < actualParams.length && actualParams[nextAnonParameter].name) {\n\t\t\t\tnextAnonParameter++;\n\t\t\t}\n\t\t\tif(paramValue === undefined && nextAnonParameter < actualParams.length) {\n\t\t\t\tparamValue = actualParams[nextAnonParameter++].value;\n\t\t\t}\n\t\t\t// If we've still not got a value, use the default, if any\n\t\t\tparamValue = paramValue || paramInfo[\"default\"] || \"\";\n\t\t\t// Replace any instances of this parameter\n\t\t\ttext = text.replace(new RegExp(\"\\\\$\" + $tw.utils.escapeRegExp(paramInfo.name) + \"\\\\$\",\"mg\"),paramValue);\n\t\t}\n\t}\n\treturn text;\n};\n\nWidget.prototype.substituteVariableReferences = function(text) {\n\tvar self = this;\n\treturn (text || \"\").replace(/\\$\\(([^\\)\\$]+)\\)\\$/g,function(match,p1,offset,string) {\n\t\treturn self.getVariable(p1,{defaultValue: \"\"});\n\t});\n};\n\nWidget.prototype.evaluateMacroModule = function(name,actualParams,defaultValue) {\n\tif($tw.utils.hop($tw.macros,name)) {\n\t\tvar macro = $tw.macros[name],\n\t\t\targs = [];\n\t\tif(macro.params.length > 0) {\n\t\t\tvar nextAnonParameter = 0, // Next candidate anonymous parameter in macro call\n\t\t\t\tparamInfo, paramValue;\n\t\t\t// Step through each of the parameters in the macro definition\n\t\t\tfor(var p=0; p<macro.params.length; p++) {\n\t\t\t\t// Check if we've got a macro call parameter with the same name\n\t\t\t\tparamInfo = macro.params[p];\n\t\t\t\tparamValue = undefined;\n\t\t\t\tfor(var m=0; m<actualParams.length; m++) {\n\t\t\t\t\tif(actualParams[m].name === paramInfo.name) {\n\t\t\t\t\t\tparamValue = actualParams[m].value;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// If not, use the next available anonymous macro call parameter\n\t\t\t\twhile(nextAnonParameter < actualParams.length && actualParams[nextAnonParameter].name) {\n\t\t\t\t\tnextAnonParameter++;\n\t\t\t\t}\n\t\t\t\tif(paramValue === undefined && nextAnonParameter < actualParams.length) {\n\t\t\t\t\tparamValue = actualParams[nextAnonParameter++].value;\n\t\t\t\t}\n\t\t\t\t// If we've still not got a value, use the default, if any\n\t\t\t\tparamValue = paramValue || paramInfo[\"default\"] || \"\";\n\t\t\t\t// Save the parameter\n\t\t\t\targs.push(paramValue);\n\t\t\t}\n\t\t}\n\t\telse for(var i=0; i<actualParams.length; ++i) {\n\t\t\targs.push(actualParams[i].value);\n\t\t}\n\t\treturn (macro.run.apply(this,args) || \"\").toString();\n\t} else {\n\t\treturn defaultValue;\n\t}\n};\n\n/*\nCheck whether a given context variable value exists in the parent chain\n*/\nWidget.prototype.hasVariable = function(name,value) {\n\tvar node = this;\n\twhile(node) {\n\t\tif($tw.utils.hop(node.variables,name) && node.variables[name].value === value) {\n\t\t\treturn true;\n\t\t}\n\t\tnode = node.parentWidget;\n\t}\n\treturn false;\n};\n\n/*\nConstruct a qualifying string based on a hash of concatenating the values of a given variable in the parent chain\n*/\nWidget.prototype.getStateQualifier = function(name) {\n\tthis.qualifiers = this.qualifiers || Object.create(null);\n\tname = name || \"transclusion\";\n\tif(this.qualifiers[name]) {\n\t\treturn this.qualifiers[name];\n\t} else {\n\t\tvar output = [],\n\t\t\tnode = this;\n\t\twhile(node && node.parentWidget) {\n\t\t\tif($tw.utils.hop(node.parentWidget.variables,name)) {\n\t\t\t\toutput.push(node.getVariable(name));\n\t\t\t}\n\t\t\tnode = node.parentWidget;\n\t\t}\n\t\tvar value = $tw.utils.hashString(output.join(\"\"));\n\t\tthis.qualifiers[name] = value;\n\t\treturn value;\n\t}\n};\n\n/*\nCompute the current values of the attributes of the widget. Returns a hashmap of the names of the attributes that have changed\n*/\nWidget.prototype.computeAttributes = function() {\n\tvar changedAttributes = {},\n\t\tself = this,\n\t\tvalue;\n\t$tw.utils.each(this.parseTreeNode.attributes,function(attribute,name) {\n\t\tif(attribute.type === \"indirect\") {\n\t\t\tvalue = self.wiki.getTextReference(attribute.textReference,\"\",self.getVariable(\"currentTiddler\"));\n\t\t} else if(attribute.type === \"macro\") {\n\t\t\tvalue = self.getVariable(attribute.value.name,{params: attribute.value.params});\n\t\t} else { // String attribute\n\t\t\tvalue = attribute.value;\n\t\t}\n\t\t// Check whether the attribute has changed\n\t\tif(self.attributes[name] !== value) {\n\t\t\tself.attributes[name] = value;\n\t\t\tchangedAttributes[name] = true;\n\t\t}\n\t});\n\treturn changedAttributes;\n};\n\n/*\nCheck for the presence of an attribute\n*/\nWidget.prototype.hasAttribute = function(name) {\n\treturn $tw.utils.hop(this.attributes,name);\n};\n\n/*\nGet the value of an attribute\n*/\nWidget.prototype.getAttribute = function(name,defaultText) {\n\tif($tw.utils.hop(this.attributes,name)) {\n\t\treturn this.attributes[name];\n\t} else {\n\t\treturn defaultText;\n\t}\n};\n\n/*\nAssign the computed attributes of the widget to a domNode\noptions include:\nexcludeEventAttributes: ignores attributes whose name begins with \"on\"\n*/\nWidget.prototype.assignAttributes = function(domNode,options) {\n\toptions = options || {};\n\tvar self = this;\n\t$tw.utils.each(this.attributes,function(v,a) {\n\t\t// Check exclusions\n\t\tif(options.excludeEventAttributes && a.substr(0,2) === \"on\") {\n\t\t\tv = undefined;\n\t\t}\n\t\tif(v !== undefined) {\n\t\t\tvar b = a.split(\":\");\n\t\t\t// Setting certain attributes can cause a DOM error (eg xmlns on the svg element)\n\t\t\ttry {\n\t\t\t\tif (b.length == 2 && b[0] == \"xlink\"){\n\t\t\t\t\tdomNode.setAttributeNS(\"http://www.w3.org/1999/xlink\",b[1],v);\n\t\t\t\t} else {\n\t\t\t\t\tdomNode.setAttributeNS(null,a,v);\n\t\t\t\t}\n\t\t\t} catch(e) {\n\t\t\t}\n\t\t}\n\t});\n};\n\n/*\nMake child widgets correspondng to specified parseTreeNodes\n*/\nWidget.prototype.makeChildWidgets = function(parseTreeNodes) {\n\tthis.children = [];\n\tvar self = this;\n\t$tw.utils.each(parseTreeNodes || (this.parseTreeNode && this.parseTreeNode.children),function(childNode) {\n\t\tself.children.push(self.makeChildWidget(childNode));\n\t});\n};\n\n/*\nConstruct the widget object for a parse tree node\n*/\nWidget.prototype.makeChildWidget = function(parseTreeNode) {\n\tvar WidgetClass = this.widgetClasses[parseTreeNode.type];\n\tif(!WidgetClass) {\n\t\tWidgetClass = this.widgetClasses.text;\n\t\tparseTreeNode = {type: \"text\", text: \"Undefined widget '\" + parseTreeNode.type + \"'\"};\n\t}\n\treturn new WidgetClass(parseTreeNode,{\n\t\twiki: this.wiki,\n\t\tvariables: {},\n\t\tparentWidget: this,\n\t\tdocument: this.document\n\t});\n};\n\n/*\nGet the next sibling of this widget\n*/\nWidget.prototype.nextSibling = function() {\n\tif(this.parentWidget) {\n\t\tvar index = this.parentWidget.children.indexOf(this);\n\t\tif(index !== -1 && index < this.parentWidget.children.length-1) {\n\t\t\treturn this.parentWidget.children[index+1];\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nGet the previous sibling of this widget\n*/\nWidget.prototype.previousSibling = function() {\n\tif(this.parentWidget) {\n\t\tvar index = this.parentWidget.children.indexOf(this);\n\t\tif(index !== -1 && index > 0) {\n\t\t\treturn this.parentWidget.children[index-1];\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nRender the children of this widget into the DOM\n*/\nWidget.prototype.renderChildren = function(parent,nextSibling) {\n\t$tw.utils.each(this.children,function(childWidget) {\n\t\tchildWidget.render(parent,nextSibling);\n\t});\n};\n\n/*\nAdd a list of event listeners from an array [{type:,handler:},...]\n*/\nWidget.prototype.addEventListeners = function(listeners) {\n\tvar self = this;\n\t$tw.utils.each(listeners,function(listenerInfo) {\n\t\tself.addEventListener(listenerInfo.type,listenerInfo.handler);\n\t});\n};\n\n/*\nAdd an event listener\n*/\nWidget.prototype.addEventListener = function(type,handler) {\n\tvar self = this;\n\tif(typeof handler === \"string\") { // The handler is a method name on this widget\n\t\tthis.eventListeners[type] = function(event) {\n\t\t\treturn self[handler].call(self,event);\n\t\t};\n\t} else { // The handler is a function\n\t\tthis.eventListeners[type] = function(event) {\n\t\t\treturn handler.call(self,event);\n\t\t};\n\t}\n};\n\n/*\nDispatch an event to a widget. If the widget doesn't handle the event then it is also dispatched to the parent widget\n*/\nWidget.prototype.dispatchEvent = function(event) {\n\t// Dispatch the event if this widget handles it\n\tvar listener = this.eventListeners[event.type];\n\tif(listener) {\n\t\t// Don't propagate the event if the listener returned false\n\t\tif(!listener(event)) {\n\t\t\treturn false;\n\t\t}\n\t}\n\t// Dispatch the event to the parent widget\n\tif(this.parentWidget) {\n\t\treturn this.parentWidget.dispatchEvent(event);\n\t}\n\treturn true;\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nRebuild a previously rendered widget\n*/\nWidget.prototype.refreshSelf = function() {\n\tvar nextSibling = this.findNextSiblingDomNode();\n\tthis.removeChildDomNodes();\n\tthis.render(this.parentDomNode,nextSibling);\n};\n\n/*\nRefresh all the children of a widget\n*/\nWidget.prototype.refreshChildren = function(changedTiddlers) {\n\tvar self = this,\n\t\trefreshed = false;\n\t$tw.utils.each(this.children,function(childWidget) {\n\t\trefreshed = childWidget.refresh(changedTiddlers) || refreshed;\n\t});\n\treturn refreshed;\n};\n\n/*\nFind the next sibling in the DOM to this widget. This is done by scanning the widget tree through all next siblings and their descendents that share the same parent DOM node\n*/\nWidget.prototype.findNextSiblingDomNode = function(startIndex) {\n\t// Refer to this widget by its index within its parents children\n\tvar parent = this.parentWidget,\n\t\tindex = startIndex !== undefined ? startIndex : parent.children.indexOf(this);\nif(index === -1) {\n\tthrow \"node not found in parents children\";\n}\n\t// Look for a DOM node in the later siblings\n\twhile(++index < parent.children.length) {\n\t\tvar domNode = parent.children[index].findFirstDomNode();\n\t\tif(domNode) {\n\t\t\treturn domNode;\n\t\t}\n\t}\n\t// Go back and look for later siblings of our parent if it has the same parent dom node\n\tvar grandParent = parent.parentWidget;\n\tif(grandParent && parent.parentDomNode === this.parentDomNode) {\n\t\tindex = grandParent.children.indexOf(parent);\n\t\tif(index !== -1) {\n\t\t\treturn parent.findNextSiblingDomNode(index);\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nFind the first DOM node generated by a widget or its children\n*/\nWidget.prototype.findFirstDomNode = function() {\n\t// Return the first dom node of this widget, if we've got one\n\tif(this.domNodes.length > 0) {\n\t\treturn this.domNodes[0];\n\t}\n\t// Otherwise, recursively call our children\n\tfor(var t=0; t<this.children.length; t++) {\n\t\tvar domNode = this.children[t].findFirstDomNode();\n\t\tif(domNode) {\n\t\t\treturn domNode;\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nRemove any DOM nodes created by this widget or its children\n*/\nWidget.prototype.removeChildDomNodes = function() {\n\t// If this widget has directly created DOM nodes, delete them and exit. This assumes that any child widgets are contained within the created DOM nodes, which would normally be the case\n\tif(this.domNodes.length > 0) {\n\t\t$tw.utils.each(this.domNodes,function(domNode) {\n\t\t\tdomNode.parentNode.removeChild(domNode);\n\t\t});\n\t\tthis.domNodes = [];\n\t} else {\n\t\t// Otherwise, ask the child widgets to delete their DOM nodes\n\t\t$tw.utils.each(this.children,function(childWidget) {\n\t\t\tchildWidget.removeChildDomNodes();\n\t\t});\n\t}\n};\n\n/*\nInvoke the action widgets that are descendents of the current widget.\n*/\nWidget.prototype.invokeActions = function(triggeringWidget,event) {\n\tvar handled = false;\n\t// For each child widget\n\tfor(var t=0; t<this.children.length; t++) {\n\t\tvar child = this.children[t];\n\t\t// Invoke the child if it is an action widget\n\t\tif(child.invokeAction && child.invokeAction(triggeringWidget,event)) {\n\t\t\thandled = true;\n\t\t}\n\t\t// Propagate through through the child if it permits it\n\t\tif(child.allowActionPropagation() && child.invokeActions(triggeringWidget,event)) {\n\t\t\thandled = true;\n\t\t}\n\t}\n\treturn handled;\n};\n\n/*\nInvoke the action widgets defined in a string\n*/\nWidget.prototype.invokeActionString = function(actions,triggeringWidget,event) {\n\tactions = actions || \"\";\n\tvar parser = this.wiki.parseText(\"text/vnd.tiddlywiki\",actions,{\n\t\t\tparentWidget: this,\n\t\t\tdocument: this.document\n\t\t}),\n\t\twidgetNode = this.wiki.makeWidget(parser,{\n\t\t\tparentWidget: this,\n\t\t\tdocument: this.document\n\t\t});\n\tvar container = this.document.createElement(\"div\");\n\twidgetNode.render(container,null);\n\treturn widgetNode.invokeActions(this,event);\n};\n\nWidget.prototype.allowActionPropagation = function() {\n\treturn true;\n};\n\nexports.widget = Widget;\n\n})();\n",
            "title": "$:/core/modules/widgets/widget.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/wikify.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/wikify.js\ntype: application/javascript\nmodule-type: widget\n\nWidget to wikify text into a variable\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar WikifyWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nWikifyWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nWikifyWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nWikifyWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.wikifyName = this.getAttribute(\"name\");\n\tthis.wikifyText = this.getAttribute(\"text\");\n\tthis.wikifyType = this.getAttribute(\"type\");\n\tthis.wikifyMode = this.getAttribute(\"mode\",\"block\");\n\tthis.wikifyOutput = this.getAttribute(\"output\",\"text\");\n\t// Create the parse tree\n\tthis.wikifyParser = this.wiki.parseText(this.wikifyType,this.wikifyText,{\n\t\t\tparseAsInline: this.wikifyMode === \"inline\"\n\t\t});\n\t// Create the widget tree \n\tthis.wikifyWidgetNode = this.wiki.makeWidget(this.wikifyParser,{\n\t\t\tdocument: $tw.fakeDocument,\n\t\t\tparentWidget: this\n\t\t});\n\t// Render the widget tree to the container\n\tthis.wikifyContainer = $tw.fakeDocument.createElement(\"div\");\n\tthis.wikifyWidgetNode.render(this.wikifyContainer,null);\n\tthis.wikifyResult = this.getResult();\n\t// Set context variable\n\tthis.setVariable(this.wikifyName,this.wikifyResult);\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nReturn the result string\n*/\nWikifyWidget.prototype.getResult = function() {\n\tvar result;\n\tswitch(this.wikifyOutput) {\n\t\tcase \"text\":\n\t\t\tresult = this.wikifyContainer.textContent;\n\t\t\tbreak;\n\t\tcase \"html\":\n\t\t\tresult = this.wikifyContainer.innerHTML;\n\t\t\tbreak;\n\t\tcase \"parsetree\":\n\t\t\tresult = JSON.stringify(this.wikifyParser.tree,0,$tw.config.preferences.jsonSpaces);\n\t\t\tbreak;\n\t\tcase \"widgettree\":\n\t\t\tresult = JSON.stringify(this.getWidgetTree(),0,$tw.config.preferences.jsonSpaces);\n\t\t\tbreak;\n\t}\n\treturn result;\n};\n\n/*\nReturn a string of the widget tree\n*/\nWikifyWidget.prototype.getWidgetTree = function() {\n\tvar copyNode = function(widgetNode,resultNode) {\n\t\t\tvar type = widgetNode.parseTreeNode.type;\n\t\t\tresultNode.type = type;\n\t\t\tswitch(type) {\n\t\t\t\tcase \"element\":\n\t\t\t\t\tresultNode.tag = widgetNode.parseTreeNode.tag;\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"text\":\n\t\t\t\t\tresultNode.text = widgetNode.parseTreeNode.text;\n\t\t\t\t\tbreak;\t\n\t\t\t}\n\t\t\tif(Object.keys(widgetNode.attributes || {}).length > 0) {\n\t\t\t\tresultNode.attributes = {};\n\t\t\t\t$tw.utils.each(widgetNode.attributes,function(attr,attrName) {\n\t\t\t\t\tresultNode.attributes[attrName] = widgetNode.getAttribute(attrName);\n\t\t\t\t});\n\t\t\t}\n\t\t\tif(Object.keys(widgetNode.children || {}).length > 0) {\n\t\t\t\tresultNode.children = [];\n\t\t\t\t$tw.utils.each(widgetNode.children,function(widgetChildNode) {\n\t\t\t\t\tvar node = {};\n\t\t\t\t\tresultNode.children.push(node);\n\t\t\t\t\tcopyNode(widgetChildNode,node);\n\t\t\t\t});\n\t\t\t}\n\t\t},\n\t\tresults = {};\n\tcopyNode(this.wikifyWidgetNode,results);\n\treturn results;\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nWikifyWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\t// Refresh ourselves entirely if any of our attributes have changed\n\tif(changedAttributes.name || changedAttributes.text || changedAttributes.type || changedAttributes.mode || changedAttributes.output) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\t// Refresh the widget tree\n\t\tif(this.wikifyWidgetNode.refresh(changedTiddlers)) {\n\t\t\t// Check if there was any change\n\t\t\tvar result = this.getResult();\n\t\t\tif(result !== this.wikifyResult) {\n\t\t\t\t// If so, save the change\n\t\t\t\tthis.wikifyResult = result;\n\t\t\t\tthis.setVariable(this.wikifyName,this.wikifyResult);\n\t\t\t\t// Refresh each of our child widgets\n\t\t\t\t$tw.utils.each(this.children,function(childWidget) {\n\t\t\t\t\tchildWidget.refreshSelf();\n\t\t\t\t});\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t\t// Just refresh the children\n\t\treturn this.refreshChildren(changedTiddlers);\n\t}\n};\n\nexports.wikify = WikifyWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/wikify.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/wiki-bulkops.js": {
            "text": "/*\\\ntitle: $:/core/modules/wiki-bulkops.js\ntype: application/javascript\nmodule-type: wikimethod\n\nBulk tiddler operations such as rename.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nRename a tiddler, and relink any tags or lists that reference it.\n*/\nexports.renameTiddler = function(fromTitle,toTitle) {\n\tvar self = this;\n\tfromTitle = (fromTitle || \"\").trim();\n\ttoTitle = (toTitle || \"\").trim();\n\tif(fromTitle && toTitle && fromTitle !== toTitle) {\n\t\t// Rename the tiddler itself\n\t\tvar tiddler = this.getTiddler(fromTitle);\n\t\tthis.addTiddler(new $tw.Tiddler(tiddler,{title: toTitle},this.getModificationFields()));\n\t\tthis.deleteTiddler(fromTitle);\n\t\t// Rename any tags or lists that reference it\n\t\tthis.each(function(tiddler,title) {\n\t\t\tvar tags = (tiddler.fields.tags || []).slice(0),\n\t\t\t\tlist = (tiddler.fields.list || []).slice(0),\n\t\t\t\tisModified = false;\n\t\t\t// Rename tags\n\t\t\t$tw.utils.each(tags,function (title,index) {\n\t\t\t\tif(title === fromTitle) {\n\t\t\t\t\ttags[index] = toTitle;\n\t\t\t\t\tisModified = true;\n\t\t\t\t}\n\t\t\t});\n\t\t\t// Rename lists\n\t\t\t$tw.utils.each(list,function (title,index) {\n\t\t\t\tif(title === fromTitle) {\n\t\t\t\t\tlist[index] = toTitle;\n\t\t\t\t\tisModified = true;\n\t\t\t\t}\n\t\t\t});\n\t\t\tif(isModified) {\n\t\t\t\tself.addTiddler(new $tw.Tiddler(tiddler,{tags: tags, list: list},self.getModificationFields()));\n\t\t\t}\n\t\t});\n\t}\n}\n\n})();\n",
            "title": "$:/core/modules/wiki-bulkops.js",
            "type": "application/javascript",
            "module-type": "wikimethod"
        },
        "$:/core/modules/wiki.js": {
            "text": "/*\\\ntitle: $:/core/modules/wiki.js\ntype: application/javascript\nmodule-type: wikimethod\n\nExtension methods for the $tw.Wiki object\n\nAdds the following properties to the wiki object:\n\n* `eventListeners` is a hashmap by type of arrays of listener functions\n* `changedTiddlers` is a hashmap describing changes to named tiddlers since wiki change events were last dispatched. Each entry is a hashmap containing two fields:\n\tmodified: true/false\n\tdeleted: true/false\n* `changeCount` is a hashmap by tiddler title containing a numerical index that starts at zero and is incremented each time a tiddler is created changed or deleted\n* `caches` is a hashmap by tiddler title containing a further hashmap of named cache objects. Caches are automatically cleared when a tiddler is modified or deleted\n* `globalCache` is a hashmap by cache name of cache objects that are cleared whenever any tiddler change occurs\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nvar USER_NAME_TITLE = \"$:/status/UserName\";\n\n/*\nGet the value of a text reference. Text references can have any of these forms:\n\t<tiddlertitle>\n\t<tiddlertitle>!!<fieldname>\n\t!!<fieldname> - specifies a field of the current tiddlers\n\t<tiddlertitle>##<index>\n*/\nexports.getTextReference = function(textRef,defaultText,currTiddlerTitle) {\n\tvar tr = $tw.utils.parseTextReference(textRef),\n\t\ttitle = tr.title || currTiddlerTitle;\n\tif(tr.field) {\n\t\tvar tiddler = this.getTiddler(title);\n\t\tif(tr.field === \"title\") { // Special case so we can return the title of a non-existent tiddler\n\t\t\treturn title;\n\t\t} else if(tiddler && $tw.utils.hop(tiddler.fields,tr.field)) {\n\t\t\treturn tiddler.getFieldString(tr.field);\n\t\t} else {\n\t\t\treturn defaultText;\n\t\t}\n\t} else if(tr.index) {\n\t\treturn this.extractTiddlerDataItem(title,tr.index,defaultText);\n\t} else {\n\t\treturn this.getTiddlerText(title,defaultText);\n\t}\n};\n\nexports.setTextReference = function(textRef,value,currTiddlerTitle) {\n\tvar tr = $tw.utils.parseTextReference(textRef),\n\t\ttitle = tr.title || currTiddlerTitle;\n\tthis.setText(title,tr.field,tr.index,value);\n};\n\nexports.setText = function(title,field,index,value,options) {\n\toptions = options || {};\n\tvar creationFields = options.suppressTimestamp ? {} : this.getCreationFields(),\n\t\tmodificationFields = options.suppressTimestamp ? {} : this.getModificationFields();\n\t// Check if it is a reference to a tiddler field\n\tif(index) {\n\t\tvar data = this.getTiddlerData(title,Object.create(null));\n\t\tif(value !== undefined) {\n\t\t\tdata[index] = value;\n\t\t} else {\n\t\t\tdelete data[index];\n\t\t}\n\t\tthis.setTiddlerData(title,data,modificationFields);\n\t} else {\n\t\tvar tiddler = this.getTiddler(title),\n\t\t\tfields = {title: title};\n\t\tfields[field || \"text\"] = value;\n\t\tthis.addTiddler(new $tw.Tiddler(creationFields,tiddler,fields,modificationFields));\n\t}\n};\n\nexports.deleteTextReference = function(textRef,currTiddlerTitle) {\n\tvar tr = $tw.utils.parseTextReference(textRef),\n\t\ttitle,tiddler,fields;\n\t// Check if it is a reference to a tiddler\n\tif(tr.title && !tr.field) {\n\t\tthis.deleteTiddler(tr.title);\n\t// Else check for a field reference\n\t} else if(tr.field) {\n\t\ttitle = tr.title || currTiddlerTitle;\n\t\ttiddler = this.getTiddler(title);\n\t\tif(tiddler && $tw.utils.hop(tiddler.fields,tr.field)) {\n\t\t\tfields = Object.create(null);\n\t\t\tfields[tr.field] = undefined;\n\t\t\tthis.addTiddler(new $tw.Tiddler(tiddler,fields,this.getModificationFields()));\n\t\t}\n\t}\n};\n\nexports.addEventListener = function(type,listener) {\n\tthis.eventListeners = this.eventListeners || {};\n\tthis.eventListeners[type] = this.eventListeners[type]  || [];\n\tthis.eventListeners[type].push(listener);\t\n};\n\nexports.removeEventListener = function(type,listener) {\n\tvar listeners = this.eventListeners[type];\n\tif(listeners) {\n\t\tvar p = listeners.indexOf(listener);\n\t\tif(p !== -1) {\n\t\t\tlisteners.splice(p,1);\n\t\t}\n\t}\n};\n\nexports.dispatchEvent = function(type /*, args */) {\n\tvar args = Array.prototype.slice.call(arguments,1),\n\t\tlisteners = this.eventListeners[type];\n\tif(listeners) {\n\t\tfor(var p=0; p<listeners.length; p++) {\n\t\t\tvar listener = listeners[p];\n\t\t\tlistener.apply(listener,args);\n\t\t}\n\t}\n};\n\n/*\nCauses a tiddler to be marked as changed, incrementing the change count, and triggers event handlers.\nThis method should be called after the changes it describes have been made to the wiki.tiddlers[] array.\n\ttitle: Title of tiddler\n\tisDeleted: defaults to false (meaning the tiddler has been created or modified),\n\t\ttrue if the tiddler has been deleted\n*/\nexports.enqueueTiddlerEvent = function(title,isDeleted) {\n\t// Record the touch in the list of changed tiddlers\n\tthis.changedTiddlers = this.changedTiddlers || Object.create(null);\n\tthis.changedTiddlers[title] = this.changedTiddlers[title] || Object.create(null);\n\tthis.changedTiddlers[title][isDeleted ? \"deleted\" : \"modified\"] = true;\n\t// Increment the change count\n\tthis.changeCount = this.changeCount || Object.create(null);\n\tif($tw.utils.hop(this.changeCount,title)) {\n\t\tthis.changeCount[title]++;\n\t} else {\n\t\tthis.changeCount[title] = 1;\n\t}\n\t// Trigger events\n\tthis.eventListeners = this.eventListeners || {};\n\tif(!this.eventsTriggered) {\n\t\tvar self = this;\n\t\t$tw.utils.nextTick(function() {\n\t\t\tvar changes = self.changedTiddlers;\n\t\t\tself.changedTiddlers = Object.create(null);\n\t\t\tself.eventsTriggered = false;\n\t\t\tif($tw.utils.count(changes) > 0) {\n\t\t\t\tself.dispatchEvent(\"change\",changes);\n\t\t\t}\n\t\t});\n\t\tthis.eventsTriggered = true;\n\t}\n};\n\nexports.getSizeOfTiddlerEventQueue = function() {\n\treturn $tw.utils.count(this.changedTiddlers);\n};\n\nexports.clearTiddlerEventQueue = function() {\n\tthis.changedTiddlers = Object.create(null);\n\tthis.changeCount = Object.create(null);\n};\n\nexports.getChangeCount = function(title) {\n\tthis.changeCount = this.changeCount || Object.create(null);\n\tif($tw.utils.hop(this.changeCount,title)) {\n\t\treturn this.changeCount[title];\n\t} else {\n\t\treturn 0;\n\t}\n};\n\n/*\nGenerate an unused title from the specified base\n*/\nexports.generateNewTitle = function(baseTitle,options) {\n\toptions = options || {};\n\tvar c = 0,\n\t\ttitle = baseTitle;\n\twhile(this.tiddlerExists(title) || this.isShadowTiddler(title) || this.findDraft(title)) {\n\t\ttitle = baseTitle + \n\t\t\t(options.prefix || \" \") + \n\t\t\t(++c);\n\t}\n\treturn title;\n};\n\nexports.isSystemTiddler = function(title) {\n\treturn title && title.indexOf(\"$:/\") === 0;\n};\n\nexports.isTemporaryTiddler = function(title) {\n\treturn title && title.indexOf(\"$:/temp/\") === 0;\n};\n\nexports.isImageTiddler = function(title) {\n\tvar tiddler = this.getTiddler(title);\n\tif(tiddler) {\t\t\n\t\tvar contentTypeInfo = $tw.config.contentTypeInfo[tiddler.fields.type || \"text/vnd.tiddlywiki\"];\n\t\treturn !!contentTypeInfo && contentTypeInfo.flags.indexOf(\"image\") !== -1;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n/*\nLike addTiddler() except it will silently reject any plugin tiddlers that are older than the currently loaded version. Returns true if the tiddler was imported\n*/\nexports.importTiddler = function(tiddler) {\n\tvar existingTiddler = this.getTiddler(tiddler.fields.title);\n\t// Check if we're dealing with a plugin\n\tif(tiddler && tiddler.hasField(\"plugin-type\") && tiddler.hasField(\"version\") && existingTiddler && existingTiddler.hasField(\"plugin-type\") && existingTiddler.hasField(\"version\")) {\n\t\t// Reject the incoming plugin if it is older\n\t\tif(!$tw.utils.checkVersions(tiddler.fields.version,existingTiddler.fields.version)) {\n\t\t\treturn false;\n\t\t}\n\t}\n\t// Fall through to adding the tiddler\n\tthis.addTiddler(tiddler);\n\treturn true;\n};\n\n/*\nReturn a hashmap of the fields that should be set when a tiddler is created\n*/\nexports.getCreationFields = function() {\n\tvar fields = {\n\t\t\tcreated: new Date()\n\t\t},\n\t\tcreator = this.getTiddlerText(USER_NAME_TITLE);\n\tif(creator) {\n\t\tfields.creator = creator;\n\t}\n\treturn fields;\n};\n\n/*\nReturn a hashmap of the fields that should be set when a tiddler is modified\n*/\nexports.getModificationFields = function() {\n\tvar fields = Object.create(null),\n\t\tmodifier = this.getTiddlerText(USER_NAME_TITLE);\n\tfields.modified = new Date();\n\tif(modifier) {\n\t\tfields.modifier = modifier;\n\t}\n\treturn fields;\n};\n\n/*\nReturn a sorted array of tiddler titles.  Options include:\nsortField: field to sort by\nexcludeTag: tag to exclude\nincludeSystem: whether to include system tiddlers (defaults to false)\n*/\nexports.getTiddlers = function(options) {\n\toptions = options || Object.create(null);\n\tvar self = this,\n\t\tsortField = options.sortField || \"title\",\n\t\ttiddlers = [], t, titles = [];\n\tthis.each(function(tiddler,title) {\n\t\tif(options.includeSystem || !self.isSystemTiddler(title)) {\n\t\t\tif(!options.excludeTag || !tiddler.hasTag(options.excludeTag)) {\n\t\t\t\ttiddlers.push(tiddler);\n\t\t\t}\n\t\t}\n\t});\n\ttiddlers.sort(function(a,b) {\n\t\tvar aa = a.fields[sortField].toLowerCase() || \"\",\n\t\t\tbb = b.fields[sortField].toLowerCase() || \"\";\n\t\tif(aa < bb) {\n\t\t\treturn -1;\n\t\t} else {\n\t\t\tif(aa > bb) {\n\t\t\t\treturn 1;\n\t\t\t} else {\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t}\n\t});\n\tfor(t=0; t<tiddlers.length; t++) {\n\t\ttitles.push(tiddlers[t].fields.title);\n\t}\n\treturn titles;\n};\n\nexports.countTiddlers = function(excludeTag) {\n\tvar tiddlers = this.getTiddlers({excludeTag: excludeTag});\n\treturn $tw.utils.count(tiddlers);\n};\n\n/*\nReturns a function iterator(callback) that iterates through the specified titles, and invokes the callback with callback(tiddler,title)\n*/\nexports.makeTiddlerIterator = function(titles) {\n\tvar self = this;\n\tif(!$tw.utils.isArray(titles)) {\n\t\ttitles = Object.keys(titles);\n\t} else {\n\t\ttitles = titles.slice(0);\n\t}\n\treturn function(callback) {\n\t\ttitles.forEach(function(title) {\n\t\t\tcallback(self.getTiddler(title),title);\n\t\t});\n\t};\n};\n\n/*\nSort an array of tiddler titles by a specified field\n\ttitles: array of titles (sorted in place)\n\tsortField: name of field to sort by\n\tisDescending: true if the sort should be descending\n\tisCaseSensitive: true if the sort should consider upper and lower case letters to be different\n*/\nexports.sortTiddlers = function(titles,sortField,isDescending,isCaseSensitive,isNumeric) {\n\tvar self = this;\n\ttitles.sort(function(a,b) {\n\t\tvar x,y,\n\t\t\tcompareNumbers = function(x,y) {\n\t\t\t\tvar result = \n\t\t\t\t\tisNaN(x) && !isNaN(y) ? (isDescending ? -1 : 1) :\n\t\t\t\t\t!isNaN(x) && isNaN(y) ? (isDescending ? 1 : -1) :\n\t\t\t\t\t                        (isDescending ? y - x :  x - y);\n\t\t\t\treturn result;\n\t\t\t};\n\t\tif(sortField !== \"title\") {\n\t\t\tvar tiddlerA = self.getTiddler(a),\n\t\t\t\ttiddlerB = self.getTiddler(b);\n\t\t\tif(tiddlerA) {\n\t\t\t\ta = tiddlerA.fields[sortField] || \"\";\n\t\t\t} else {\n\t\t\t\ta = \"\";\n\t\t\t}\n\t\t\tif(tiddlerB) {\n\t\t\t\tb = tiddlerB.fields[sortField] || \"\";\n\t\t\t} else {\n\t\t\t\tb = \"\";\n\t\t\t}\n\t\t}\n\t\tx = Number(a);\n\t\ty = Number(b);\n\t\tif(isNumeric && (!isNaN(x) || !isNaN(y))) {\n\t\t\treturn compareNumbers(x,y);\n\t\t} else if($tw.utils.isDate(a) && $tw.utils.isDate(b)) {\n\t\t\treturn isDescending ? b - a : a - b;\n\t\t} else {\n\t\t\ta = String(a);\n\t\t\tb = String(b);\n\t\t\tif(!isCaseSensitive) {\n\t\t\t\ta = a.toLowerCase();\n\t\t\t\tb = b.toLowerCase();\n\t\t\t}\n\t\t\treturn isDescending ? b.localeCompare(a) : a.localeCompare(b);\n\t\t}\n\t});\n};\n\n/*\nFor every tiddler invoke a callback(title,tiddler) with `this` set to the wiki object. Options include:\nsortField: field to sort by\nexcludeTag: tag to exclude\nincludeSystem: whether to include system tiddlers (defaults to false)\n*/\nexports.forEachTiddler = function(/* [options,]callback */) {\n\tvar arg = 0,\n\t\toptions = arguments.length >= 2 ? arguments[arg++] : {},\n\t\tcallback = arguments[arg++],\n\t\ttitles = this.getTiddlers(options),\n\t\tt, tiddler;\n\tfor(t=0; t<titles.length; t++) {\n\t\ttiddler = this.getTiddler(titles[t]);\n\t\tif(tiddler) {\n\t\t\tcallback.call(this,tiddler.fields.title,tiddler);\n\t\t}\n\t}\n};\n\n/*\nReturn an array of tiddler titles that are directly linked from the specified tiddler\n*/\nexports.getTiddlerLinks = function(title) {\n\tvar self = this;\n\t// We'll cache the links so they only get computed if the tiddler changes\n\treturn this.getCacheForTiddler(title,\"links\",function() {\n\t\t// Parse the tiddler\n\t\tvar parser = self.parseTiddler(title);\n\t\t// Count up the links\n\t\tvar links = [],\n\t\t\tcheckParseTree = function(parseTree) {\n\t\t\t\tfor(var t=0; t<parseTree.length; t++) {\n\t\t\t\t\tvar parseTreeNode = parseTree[t];\n\t\t\t\t\tif(parseTreeNode.type === \"link\" && parseTreeNode.attributes.to && parseTreeNode.attributes.to.type === \"string\") {\n\t\t\t\t\t\tvar value = parseTreeNode.attributes.to.value;\n\t\t\t\t\t\tif(links.indexOf(value) === -1) {\n\t\t\t\t\t\t\tlinks.push(value);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif(parseTreeNode.children) {\n\t\t\t\t\t\tcheckParseTree(parseTreeNode.children);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\tif(parser) {\n\t\t\tcheckParseTree(parser.tree);\n\t\t}\n\t\treturn links;\n\t});\n};\n\n/*\nReturn an array of tiddler titles that link to the specified tiddler\n*/\nexports.getTiddlerBacklinks = function(targetTitle) {\n\tvar self = this,\n\t\tbacklinks = [];\n\tthis.forEachTiddler(function(title,tiddler) {\n\t\tvar links = self.getTiddlerLinks(title);\n\t\tif(links.indexOf(targetTitle) !== -1) {\n\t\t\tbacklinks.push(title);\n\t\t}\n\t});\n\treturn backlinks;\n};\n\n/*\nReturn a hashmap of tiddler titles that are referenced but not defined. Each value is the number of times the missing tiddler is referenced\n*/\nexports.getMissingTitles = function() {\n\tvar self = this,\n\t\tmissing = [];\n// We should cache the missing tiddler list, even if we recreate it every time any tiddler is modified\n\tthis.forEachTiddler(function(title,tiddler) {\n\t\tvar links = self.getTiddlerLinks(title);\n\t\t$tw.utils.each(links,function(link) {\n\t\t\tif((!self.tiddlerExists(link) && !self.isShadowTiddler(link)) && missing.indexOf(link) === -1) {\n\t\t\t\tmissing.push(link);\n\t\t\t}\n\t\t});\n\t});\n\treturn missing;\n};\n\nexports.getOrphanTitles = function() {\n\tvar self = this,\n\t\torphans = this.getTiddlers();\n\tthis.forEachTiddler(function(title,tiddler) {\n\t\tvar links = self.getTiddlerLinks(title);\n\t\t$tw.utils.each(links,function(link) {\n\t\t\tvar p = orphans.indexOf(link);\n\t\t\tif(p !== -1) {\n\t\t\t\torphans.splice(p,1);\n\t\t\t}\n\t\t});\n\t});\n\treturn orphans; // Todo\n};\n\n/*\nRetrieves a list of the tiddler titles that are tagged with a given tag\n*/\nexports.getTiddlersWithTag = function(tag) {\n\tvar self = this;\n\treturn this.getGlobalCache(\"taglist-\" + tag,function() {\n\t\tvar tagmap = self.getTagMap();\n\t\treturn self.sortByList(tagmap[tag],tag);\n\t});\n};\n\n/*\nGet a hashmap by tag of arrays of tiddler titles\n*/\nexports.getTagMap = function() {\n\tvar self = this;\n\treturn this.getGlobalCache(\"tagmap\",function() {\n\t\tvar tags = Object.create(null),\n\t\t\tstoreTags = function(tagArray,title) {\n\t\t\t\tif(tagArray) {\n\t\t\t\t\tfor(var index=0; index<tagArray.length; index++) {\n\t\t\t\t\t\tvar tag = tagArray[index];\n\t\t\t\t\t\tif($tw.utils.hop(tags,tag)) {\n\t\t\t\t\t\t\ttags[tag].push(title);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttags[tag] = [title];\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\ttitle, tiddler;\n\t\t// Collect up all the tags\n\t\tself.eachShadow(function(tiddler,title) {\n\t\t\tif(!self.tiddlerExists(title)) {\n\t\t\t\ttiddler = self.getTiddler(title);\n\t\t\t\tstoreTags(tiddler.fields.tags,title);\n\t\t\t}\n\t\t});\n\t\tself.each(function(tiddler,title) {\n\t\t\tstoreTags(tiddler.fields.tags,title);\n\t\t});\n\t\treturn tags;\n\t});\n};\n\n/*\nLookup a given tiddler and return a list of all the tiddlers that include it in the specified list field\n*/\nexports.findListingsOfTiddler = function(targetTitle,fieldName) {\n\tfieldName = fieldName || \"list\";\n\tvar titles = [];\n\tthis.each(function(tiddler,title) {\n\t\tvar list = $tw.utils.parseStringArray(tiddler.fields[fieldName]);\n\t\tif(list && list.indexOf(targetTitle) !== -1) {\n\t\t\ttitles.push(title);\n\t\t}\n\t});\n\treturn titles;\n};\n\n/*\nSorts an array of tiddler titles according to an ordered list\n*/\nexports.sortByList = function(array,listTitle) {\n\tvar list = this.getTiddlerList(listTitle);\n\tif(!array || array.length === 0) {\n\t\treturn [];\n\t} else {\n\t\tvar titles = [], t, title;\n\t\t// First place any entries that are present in the list\n\t\tfor(t=0; t<list.length; t++) {\n\t\t\ttitle = list[t];\n\t\t\tif(array.indexOf(title) !== -1) {\n\t\t\t\ttitles.push(title);\n\t\t\t}\n\t\t}\n\t\t// Then place any remaining entries\n\t\tfor(t=0; t<array.length; t++) {\n\t\t\ttitle = array[t];\n\t\t\tif(list.indexOf(title) === -1) {\n\t\t\t\ttitles.push(title);\n\t\t\t}\n\t\t}\n\t\t// Finally obey the list-before and list-after fields of each tiddler in turn\n\t\tvar sortedTitles = titles.slice(0);\n\t\tfor(t=0; t<sortedTitles.length; t++) {\n\t\t\ttitle = sortedTitles[t];\n\t\t\tvar currPos = titles.indexOf(title),\n\t\t\t\tnewPos = -1,\n\t\t\t\ttiddler = this.getTiddler(title);\n\t\t\tif(tiddler) {\n\t\t\t\tvar beforeTitle = tiddler.fields[\"list-before\"],\n\t\t\t\t\tafterTitle = tiddler.fields[\"list-after\"];\n\t\t\t\tif(beforeTitle === \"\") {\n\t\t\t\t\tnewPos = 0;\n\t\t\t\t} else if(beforeTitle) {\n\t\t\t\t\tnewPos = titles.indexOf(beforeTitle);\n\t\t\t\t} else if(afterTitle) {\n\t\t\t\t\tnewPos = titles.indexOf(afterTitle);\n\t\t\t\t\tif(newPos >= 0) {\n\t\t\t\t\t\t++newPos;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif(newPos === -1) {\n\t\t\t\t\tnewPos = currPos;\n\t\t\t\t}\n\t\t\t\tif(newPos !== currPos) {\n\t\t\t\t\ttitles.splice(currPos,1);\n\t\t\t\t\tif(newPos >= currPos) {\n\t\t\t\t\t\tnewPos--;\n\t\t\t\t\t}\n\t\t\t\t\ttitles.splice(newPos,0,title);\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\treturn titles;\n\t}\n};\n\nexports.getSubTiddler = function(title,subTiddlerTitle) {\n\tvar bundleInfo = this.getPluginInfo(title) || this.getTiddlerDataCached(title);\n\tif(bundleInfo && bundleInfo.tiddlers) {\n\t\tvar subTiddler = bundleInfo.tiddlers[subTiddlerTitle];\n\t\tif(subTiddler) {\n\t\t\treturn new $tw.Tiddler(subTiddler);\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nRetrieve a tiddler as a JSON string of the fields\n*/\nexports.getTiddlerAsJson = function(title) {\n\tvar tiddler = this.getTiddler(title);\n\tif(tiddler) {\n\t\tvar fields = Object.create(null);\n\t\t$tw.utils.each(tiddler.fields,function(value,name) {\n\t\t\tfields[name] = tiddler.getFieldString(name);\n\t\t});\n\t\treturn JSON.stringify(fields);\n\t} else {\n\t\treturn JSON.stringify({title: title});\n\t}\n};\n\n/*\nGet the content of a tiddler as a JavaScript object. How this is done depends on the type of the tiddler:\n\napplication/json: the tiddler JSON is parsed into an object\napplication/x-tiddler-dictionary: the tiddler is parsed as sequence of name:value pairs\n\nOther types currently just return null.\n\ntitleOrTiddler: string tiddler title or a tiddler object\ndefaultData: default data to be returned if the tiddler is missing or doesn't contain data\n\nNote that the same value is returned for repeated calls for the same tiddler data. The value is frozen to prevent modification; otherwise modifications would be visible to all callers\n*/\nexports.getTiddlerDataCached = function(titleOrTiddler,defaultData) {\n\tvar self = this,\n\t\ttiddler = titleOrTiddler;\n\tif(!(tiddler instanceof $tw.Tiddler)) {\n\t\ttiddler = this.getTiddler(tiddler);\t\n\t}\n\tif(tiddler) {\n\t\treturn this.getCacheForTiddler(tiddler.fields.title,\"data\",function() {\n\t\t\t// Return the frozen value\n\t\t\tvar value = self.getTiddlerData(tiddler.fields.title,defaultData);\n\t\t\t$tw.utils.deepFreeze(value);\n\t\t\treturn value;\n\t\t});\n\t} else {\n\t\treturn defaultData;\n\t}\n};\n\n/*\nAlternative, uncached version of getTiddlerDataCached(). The return value can be mutated freely and reused\n*/\nexports.getTiddlerData = function(titleOrTiddler,defaultData) {\n\tvar tiddler = titleOrTiddler,\n\t\tdata;\n\tif(!(tiddler instanceof $tw.Tiddler)) {\n\t\ttiddler = this.getTiddler(tiddler);\t\n\t}\n\tif(tiddler && tiddler.fields.text) {\n\t\tswitch(tiddler.fields.type) {\n\t\t\tcase \"application/json\":\n\t\t\t\t// JSON tiddler\n\t\t\t\ttry {\n\t\t\t\t\tdata = JSON.parse(tiddler.fields.text);\n\t\t\t\t} catch(ex) {\n\t\t\t\t\treturn defaultData;\n\t\t\t\t}\n\t\t\t\treturn data;\n\t\t\tcase \"application/x-tiddler-dictionary\":\n\t\t\t\treturn $tw.utils.parseFields(tiddler.fields.text);\n\t\t}\n\t}\n\treturn defaultData;\n};\n\n/*\nExtract an indexed field from within a data tiddler\n*/\nexports.extractTiddlerDataItem = function(titleOrTiddler,index,defaultText) {\n\tvar data = this.getTiddlerData(titleOrTiddler,Object.create(null)),\n\t\ttext;\n\tif(data && $tw.utils.hop(data,index)) {\n\t\ttext = data[index];\n\t}\n\tif(typeof text === \"string\" || typeof text === \"number\") {\n\t\treturn text.toString();\n\t} else {\n\t\treturn defaultText;\n\t}\n};\n\n/*\nSet a tiddlers content to a JavaScript object. Currently this is done by setting the tiddler's type to \"application/json\" and setting the text to the JSON text of the data.\ntitle: title of tiddler\ndata: object that can be serialised to JSON\nfields: optional hashmap of additional tiddler fields to be set\n*/\nexports.setTiddlerData = function(title,data,fields) {\n\tvar existingTiddler = this.getTiddler(title),\n\t\tnewFields = {\n\t\t\ttitle: title\n\t};\n\tif(existingTiddler && existingTiddler.fields.type === \"application/x-tiddler-dictionary\") {\n\t\tnewFields.text = $tw.utils.makeTiddlerDictionary(data);\n\t} else {\n\t\tnewFields.type = \"application/json\";\n\t\tnewFields.text = JSON.stringify(data,null,$tw.config.preferences.jsonSpaces);\n\t}\n\tthis.addTiddler(new $tw.Tiddler(this.getCreationFields(),existingTiddler,fields,newFields,this.getModificationFields()));\n};\n\n/*\nReturn the content of a tiddler as an array containing each line\n*/\nexports.getTiddlerList = function(title,field,index) {\n\tif(index) {\n\t\treturn $tw.utils.parseStringArray(this.extractTiddlerDataItem(title,index,\"\"));\n\t}\n\tfield = field || \"list\";\n\tvar tiddler = this.getTiddler(title);\n\tif(tiddler) {\n\t\treturn ($tw.utils.parseStringArray(tiddler.fields[field]) || []).slice(0);\n\t}\n\treturn [];\n};\n\n// Return a named global cache object. Global cache objects are cleared whenever a tiddler change occurs\nexports.getGlobalCache = function(cacheName,initializer) {\n\tthis.globalCache = this.globalCache || Object.create(null);\n\tif($tw.utils.hop(this.globalCache,cacheName)) {\n\t\treturn this.globalCache[cacheName];\n\t} else {\n\t\tthis.globalCache[cacheName] = initializer();\n\t\treturn this.globalCache[cacheName];\n\t}\n};\n\nexports.clearGlobalCache = function() {\n\tthis.globalCache = Object.create(null);\n};\n\n// Return the named cache object for a tiddler. If the cache doesn't exist then the initializer function is invoked to create it\nexports.getCacheForTiddler = function(title,cacheName,initializer) {\n\tthis.caches = this.caches || Object.create(null);\n\tvar caches = this.caches[title];\n\tif(caches && caches[cacheName]) {\n\t\treturn caches[cacheName];\n\t} else {\n\t\tif(!caches) {\n\t\t\tcaches = Object.create(null);\n\t\t\tthis.caches[title] = caches;\n\t\t}\n\t\tcaches[cacheName] = initializer();\n\t\treturn caches[cacheName];\n\t}\n};\n\n// Clear all caches associated with a particular tiddler, or, if the title is null, clear all the caches for all the tiddlers\nexports.clearCache = function(title) {\n\tif(title) {\n\t\tthis.caches = this.caches || Object.create(null);\n\t\tif($tw.utils.hop(this.caches,title)) {\n\t\t\tdelete this.caches[title];\n\t\t}\n\t} else {\n\t\tthis.caches = Object.create(null);\n\t}\n};\n\nexports.initParsers = function(moduleType) {\n\t// Install the parser modules\n\t$tw.Wiki.parsers = {};\n\tvar self = this;\n\t$tw.modules.forEachModuleOfType(\"parser\",function(title,module) {\n\t\tfor(var f in module) {\n\t\t\tif($tw.utils.hop(module,f)) {\n\t\t\t\t$tw.Wiki.parsers[f] = module[f]; // Store the parser class\n\t\t\t}\n\t\t}\n\t});\n};\n\n/*\nParse a block of text of a specified MIME type\n\ttype: content type of text to be parsed\n\ttext: text\n\toptions: see below\nOptions include:\n\tparseAsInline: if true, the text of the tiddler will be parsed as an inline run\n\t_canonical_uri: optional string of the canonical URI of this content\n*/\nexports.parseText = function(type,text,options) {\n\ttext = text || \"\";\n\toptions = options || {};\n\t// Select a parser\n\tvar Parser = $tw.Wiki.parsers[type];\n\tif(!Parser && $tw.utils.getFileExtensionInfo(type)) {\n\t\tParser = $tw.Wiki.parsers[$tw.utils.getFileExtensionInfo(type).type];\n\t}\n\tif(!Parser) {\n\t\tParser = $tw.Wiki.parsers[options.defaultType || \"text/vnd.tiddlywiki\"];\n\t}\n\tif(!Parser) {\n\t\treturn null;\n\t}\n\t// Return the parser instance\n\treturn new Parser(type,text,{\n\t\tparseAsInline: options.parseAsInline,\n\t\twiki: this,\n\t\t_canonical_uri: options._canonical_uri\n\t});\n};\n\n/*\nParse a tiddler according to its MIME type\n*/\nexports.parseTiddler = function(title,options) {\n\toptions = $tw.utils.extend({},options);\n\tvar cacheType = options.parseAsInline ? \"inlineParseTree\" : \"blockParseTree\",\n\t\ttiddler = this.getTiddler(title),\n\t\tself = this;\n\treturn tiddler ? this.getCacheForTiddler(title,cacheType,function() {\n\t\t\tif(tiddler.hasField(\"_canonical_uri\")) {\n\t\t\t\toptions._canonical_uri = tiddler.fields._canonical_uri;\n\t\t\t}\n\t\t\treturn self.parseText(tiddler.fields.type,tiddler.fields.text,options);\n\t\t}) : null;\n};\n\nexports.parseTextReference = function(title,field,index,options) {\n\tvar tiddler,text;\n\tif(options.subTiddler) {\n\t\ttiddler = this.getSubTiddler(title,options.subTiddler);\n\t} else {\n\t\ttiddler = this.getTiddler(title);\n\t\tif(field === \"text\" || (!field && !index)) {\n\t\t\tthis.getTiddlerText(title); // Force the tiddler to be lazily loaded\n\t\t\treturn this.parseTiddler(title,options);\n\t\t}\n\t}\n\tif(field === \"text\" || (!field && !index)) {\n\t\tif(tiddler && tiddler.fields) {\n\t\t\treturn this.parseText(tiddler.fields.type || \"text/vnd.tiddlywiki\",tiddler.fields.text,options);\t\t\t\n\t\t} else {\n\t\t\treturn null;\n\t\t}\n\t} else if(field) {\n\t\tif(field === \"title\") {\n\t\t\ttext = title;\n\t\t} else {\n\t\t\tif(!tiddler || !tiddler.hasField(field)) {\n\t\t\t\treturn null;\n\t\t\t}\n\t\t\ttext = tiddler.fields[field];\n\t\t}\n\t\treturn this.parseText(\"text/vnd.tiddlywiki\",text.toString(),options);\n\t} else if(index) {\n\t\tthis.getTiddlerText(title); // Force the tiddler to be lazily loaded\n\t\ttext = this.extractTiddlerDataItem(tiddler,index,undefined);\n\t\tif(text === undefined) {\n\t\t\treturn null;\n\t\t}\n\t\treturn this.parseText(\"text/vnd.tiddlywiki\",text,options);\n\t}\n};\n\n/*\nMake a widget tree for a parse tree\nparser: parser object\noptions: see below\nOptions include:\ndocument: optional document to use\nvariables: hashmap of variables to set\nparentWidget: optional parent widget for the root node\n*/\nexports.makeWidget = function(parser,options) {\n\toptions = options || {};\n\tvar widgetNode = {\n\t\t\ttype: \"widget\",\n\t\t\tchildren: []\n\t\t},\n\t\tcurrWidgetNode = widgetNode;\n\t// Create set variable widgets for each variable\n\t$tw.utils.each(options.variables,function(value,name) {\n\t\tvar setVariableWidget = {\n\t\t\ttype: \"set\",\n\t\t\tattributes: {\n\t\t\t\tname: {type: \"string\", value: name},\n\t\t\t\tvalue: {type: \"string\", value: value}\n\t\t\t},\n\t\t\tchildren: []\n\t\t};\n\t\tcurrWidgetNode.children = [setVariableWidget];\n\t\tcurrWidgetNode = setVariableWidget;\n\t});\n\t// Add in the supplied parse tree nodes\n\tcurrWidgetNode.children = parser ? parser.tree : [];\n\t// Create the widget\n\treturn new widget.widget(widgetNode,{\n\t\twiki: this,\n\t\tdocument: options.document || $tw.fakeDocument,\n\t\tparentWidget: options.parentWidget\n\t});\n};\n\n/*\nMake a widget tree for transclusion\ntitle: target tiddler title\noptions: as for wiki.makeWidget() plus:\noptions.field: optional field to transclude (defaults to \"text\")\noptions.mode: transclusion mode \"inline\" or \"block\"\noptions.children: optional array of children for the transclude widget\n*/\nexports.makeTranscludeWidget = function(title,options) {\n\toptions = options || {};\n\tvar parseTree = {tree: [{\n\t\t\ttype: \"element\",\n\t\t\ttag: \"div\",\n\t\t\tchildren: [{\n\t\t\t\ttype: \"transclude\",\n\t\t\t\tattributes: {\n\t\t\t\t\ttiddler: {\n\t\t\t\t\t\tname: \"tiddler\",\n\t\t\t\t\t\ttype: \"string\",\n\t\t\t\t\t\tvalue: title}},\n\t\t\t\tisBlock: !options.parseAsInline}]}\n\t]};\n\tif(options.field) {\n\t\tparseTree.tree[0].children[0].attributes.field = {type: \"string\", value: options.field};\n\t}\n\tif(options.mode) {\n\t\tparseTree.tree[0].children[0].attributes.mode = {type: \"string\", value: options.mode};\n\t}\n\tif(options.children) {\n\t\tparseTree.tree[0].children[0].children = options.children;\n\t}\n\treturn $tw.wiki.makeWidget(parseTree,options);\n};\n\n/*\nParse text in a specified format and render it into another format\n\toutputType: content type for the output\n\ttextType: content type of the input text\n\ttext: input text\n\toptions: see below\nOptions include:\nvariables: hashmap of variables to set\nparentWidget: optional parent widget for the root node\n*/\nexports.renderText = function(outputType,textType,text,options) {\n\toptions = options || {};\n\tvar parser = this.parseText(textType,text,options),\n\t\twidgetNode = this.makeWidget(parser,options);\n\tvar container = $tw.fakeDocument.createElement(\"div\");\n\twidgetNode.render(container,null);\n\treturn outputType === \"text/html\" ? container.innerHTML : container.textContent;\n};\n\n/*\nParse text from a tiddler and render it into another format\n\toutputType: content type for the output\n\ttitle: title of the tiddler to be rendered\n\toptions: see below\nOptions include:\nvariables: hashmap of variables to set\nparentWidget: optional parent widget for the root node\n*/\nexports.renderTiddler = function(outputType,title,options) {\n\toptions = options || {};\n\tvar parser = this.parseTiddler(title,options),\n\t\twidgetNode = this.makeWidget(parser,options);\n\tvar container = $tw.fakeDocument.createElement(\"div\");\n\twidgetNode.render(container,null);\n\treturn outputType === \"text/html\" ? container.innerHTML : (outputType === \"text/plain-formatted\" ? container.formattedTextContent : container.textContent);\n};\n\n/*\nReturn an array of tiddler titles that match a search string\n\ttext: The text string to search for\n\toptions: see below\nOptions available:\n\tsource: an iterator function for the source tiddlers, called source(iterator), where iterator is called as iterator(tiddler,title)\n\texclude: An array of tiddler titles to exclude from the search\n\tinvert: If true returns tiddlers that do not contain the specified string\n\tcaseSensitive: If true forces a case sensitive search\n\tliteral: If true, searches for literal string, rather than separate search terms\n\tfield: If specified, restricts the search to the specified field\n*/\nexports.search = function(text,options) {\n\toptions = options || {};\n\tvar self = this,\n\t\tt,\n\t\tinvert = !!options.invert;\n\t// Convert the search string into a regexp for each term\n\tvar terms, searchTermsRegExps,\n\t\tflags = options.caseSensitive ? \"\" : \"i\";\n\tif(options.literal) {\n\t\tif(text.length === 0) {\n\t\t\tsearchTermsRegExps = null;\n\t\t} else {\n\t\t\tsearchTermsRegExps = [new RegExp(\"(\" + $tw.utils.escapeRegExp(text) + \")\",flags)];\n\t\t}\n\t} else {\n\t\tterms = text.split(/ +/);\n\t\tif(terms.length === 1 && terms[0] === \"\") {\n\t\t\tsearchTermsRegExps = null;\n\t\t} else {\n\t\t\tsearchTermsRegExps = [];\n\t\t\tfor(t=0; t<terms.length; t++) {\n\t\t\t\tsearchTermsRegExps.push(new RegExp(\"(\" + $tw.utils.escapeRegExp(terms[t]) + \")\",flags));\n\t\t\t}\n\t\t}\n\t}\n\t// Function to check a given tiddler for the search term\n\tvar searchTiddler = function(title) {\n\t\tif(!searchTermsRegExps) {\n\t\t\treturn true;\n\t\t}\n\t\tvar tiddler = self.getTiddler(title);\n\t\tif(!tiddler) {\n\t\t\ttiddler = new $tw.Tiddler({title: title, text: \"\", type: \"text/vnd.tiddlywiki\"});\n\t\t}\n\t\tvar contentTypeInfo = $tw.config.contentTypeInfo[tiddler.fields.type] || $tw.config.contentTypeInfo[\"text/vnd.tiddlywiki\"],\n\t\t\tmatch;\n\t\tfor(var t=0; t<searchTermsRegExps.length; t++) {\n\t\t\tmatch = false;\n\t\t\tif(options.field) {\n\t\t\t\tmatch = searchTermsRegExps[t].test(tiddler.getFieldString(options.field));\n\t\t\t} else {\n\t\t\t\t// Search title, tags and body\n\t\t\t\tif(contentTypeInfo.encoding === \"utf8\") {\n\t\t\t\t\tmatch = match || searchTermsRegExps[t].test(tiddler.fields.text);\n\t\t\t\t}\n\t\t\t\tvar tags = tiddler.fields.tags ? tiddler.fields.tags.join(\"\\0\") : \"\";\n\t\t\t\tmatch = match || searchTermsRegExps[t].test(tags) || searchTermsRegExps[t].test(tiddler.fields.title);\n\t\t\t}\n\t\t\tif(!match) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t};\n\t// Loop through all the tiddlers doing the search\n\tvar results = [],\n\t\tsource = options.source || this.each;\n\tsource(function(tiddler,title) {\n\t\tif(searchTiddler(title) !== options.invert) {\n\t\t\tresults.push(title);\n\t\t}\n\t});\n\t// Remove any of the results we have to exclude\n\tif(options.exclude) {\n\t\tfor(t=0; t<options.exclude.length; t++) {\n\t\t\tvar p = results.indexOf(options.exclude[t]);\n\t\t\tif(p !== -1) {\n\t\t\t\tresults.splice(p,1);\n\t\t\t}\n\t\t}\n\t}\n\treturn results;\n};\n\n/*\nTrigger a load for a tiddler if it is skinny. Returns the text, or undefined if the tiddler is missing, null if the tiddler is being lazily loaded.\n*/\nexports.getTiddlerText = function(title,defaultText) {\n\tvar tiddler = this.getTiddler(title);\n\t// Return undefined if the tiddler isn't found\n\tif(!tiddler) {\n\t\treturn defaultText;\n\t}\n\tif(tiddler.fields.text !== undefined) {\n\t\t// Just return the text if we've got it\n\t\treturn tiddler.fields.text;\n\t} else {\n\t\t// Tell any listeners about the need to lazily load this tiddler\n\t\tthis.dispatchEvent(\"lazyLoad\",title);\n\t\t// Indicate that the text is being loaded\n\t\treturn null;\n\t}\n};\n\n/*\nRead an array of browser File objects, invoking callback(tiddlerFieldsArray) once they're all read\n*/\nexports.readFiles = function(files,callback) {\n\tvar result = [],\n\t\toutstanding = files.length;\n\tfor(var f=0; f<files.length; f++) {\n\t\tthis.readFile(files[f],function(tiddlerFieldsArray) {\n\t\t\tresult.push.apply(result,tiddlerFieldsArray);\n\t\t\tif(--outstanding === 0) {\n\t\t\t\tcallback(result);\n\t\t\t}\n\t\t});\n\t}\n\treturn files.length;\n};\n\n/*\nRead a browser File object, invoking callback(tiddlerFieldsArray) with an array of tiddler fields objects\n*/\nexports.readFile = function(file,callback) {\n\t// Get the type, falling back to the filename extension\n\tvar self = this,\n\t\ttype = file.type;\n\tif(type === \"\" || !type) {\n\t\tvar dotPos = file.name.lastIndexOf(\".\");\n\t\tif(dotPos !== -1) {\n\t\t\tvar fileExtensionInfo = $tw.utils.getFileExtensionInfo(file.name.substr(dotPos));\n\t\t\tif(fileExtensionInfo) {\n\t\t\t\ttype = fileExtensionInfo.type;\n\t\t\t}\n\t\t}\n\t}\n\t// Figure out if we're reading a binary file\n\tvar contentTypeInfo = $tw.config.contentTypeInfo[type],\n\t\tisBinary = contentTypeInfo ? contentTypeInfo.encoding === \"base64\" : false;\n\t// Log some debugging information\n\tif($tw.log.IMPORT) {\n\t\tconsole.log(\"Importing file '\" + file.name + \"', type: '\" + type + \"', isBinary: \" + isBinary);\n\t}\n\t// Create the FileReader\n\tvar reader = new FileReader();\n\t// Onload\n\treader.onload = function(event) {\n\t\t// Deserialise the file contents\n\t\tvar text = event.target.result,\n\t\t\ttiddlerFields = {title: file.name || \"Untitled\", type: type};\n\t\t// Are we binary?\n\t\tif(isBinary) {\n\t\t\t// The base64 section starts after the first comma in the data URI\n\t\t\tvar commaPos = text.indexOf(\",\");\n\t\t\tif(commaPos !== -1) {\n\t\t\t\ttiddlerFields.text = text.substr(commaPos+1);\n\t\t\t\tcallback([tiddlerFields]);\n\t\t\t}\n\t\t} else {\n\t\t\t// Check whether this is an encrypted TiddlyWiki file\n\t\t\tvar encryptedJson = $tw.utils.extractEncryptedStoreArea(text);\n\t\t\tif(encryptedJson) {\n\t\t\t\t// If so, attempt to decrypt it with the current password\n\t\t\t\t$tw.utils.decryptStoreAreaInteractive(encryptedJson,function(tiddlers) {\n\t\t\t\t\tcallback(tiddlers);\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\t// Otherwise, just try to deserialise any tiddlers in the file\n\t\t\t\tcallback(self.deserializeTiddlers(type,text,tiddlerFields));\n\t\t\t}\n\t\t}\n\t};\n\t// Kick off the read\n\tif(isBinary) {\n\t\treader.readAsDataURL(file);\n\t} else {\n\t\treader.readAsText(file);\n\t}\n};\n\n/*\nFind any existing draft of a specified tiddler\n*/\nexports.findDraft = function(targetTitle) {\n\tvar draftTitle = undefined;\n\tthis.forEachTiddler({includeSystem: true},function(title,tiddler) {\n\t\tif(tiddler.fields[\"draft.title\"] && tiddler.fields[\"draft.of\"] === targetTitle) {\n\t\t\tdraftTitle = title;\n\t\t}\n\t});\n\treturn draftTitle;\n}\n\n/*\nCheck whether the specified draft tiddler has been modified.\nIf the original tiddler doesn't exist, create  a vanilla tiddler variable,\nto check if additional fields have been added.\n*/\nexports.isDraftModified = function(title) {\n\tvar tiddler = this.getTiddler(title);\n\tif(!tiddler.isDraft()) {\n\t\treturn false;\n\t}\n\tvar ignoredFields = [\"created\", \"modified\", \"title\", \"draft.title\", \"draft.of\"],\n\t\torigTiddler = this.getTiddler(tiddler.fields[\"draft.of\"]) || new $tw.Tiddler({text:\"\", tags:[]}),\n\t\ttitleModified = tiddler.fields[\"draft.title\"] !== tiddler.fields[\"draft.of\"];\n\treturn titleModified || !tiddler.isEqual(origTiddler,ignoredFields);\n};\n\n/*\nAdd a new record to the top of the history stack\ntitle: a title string or an array of title strings\nfromPageRect: page coordinates of the origin of the navigation\nhistoryTitle: title of history tiddler (defaults to $:/HistoryList)\n*/\nexports.addToHistory = function(title,fromPageRect,historyTitle) {\n\tvar story = new $tw.Story({wiki: this, historyTitle: historyTitle});\n\tstory.addToHistory(title,fromPageRect);\n};\n\n/*\nInvoke the available upgrader modules\ntitles: array of tiddler titles to be processed\ntiddlers: hashmap by title of tiddler fields of pending import tiddlers. These can be modified by the upgraders. An entry with no fields indicates a tiddler that was pending import has been suppressed. When entries are added to the pending import the tiddlers hashmap may have entries that are not present in the titles array\nReturns a hashmap of messages keyed by tiddler title.\n*/\nexports.invokeUpgraders = function(titles,tiddlers) {\n\t// Collect up the available upgrader modules\n\tvar self = this;\n\tif(!this.upgraderModules) {\n\t\tthis.upgraderModules = [];\n\t\t$tw.modules.forEachModuleOfType(\"upgrader\",function(title,module) {\n\t\t\tif(module.upgrade) {\n\t\t\t\tself.upgraderModules.push(module);\n\t\t\t}\n\t\t});\n\t}\n\t// Invoke each upgrader in turn\n\tvar messages = {};\n\tfor(var t=0; t<this.upgraderModules.length; t++) {\n\t\tvar upgrader = this.upgraderModules[t],\n\t\t\tupgraderMessages = upgrader.upgrade(this,titles,tiddlers);\n\t\t$tw.utils.extend(messages,upgraderMessages);\n\t}\n\treturn messages;\n};\n\n})();\n",
            "title": "$:/core/modules/wiki.js",
            "type": "application/javascript",
            "module-type": "wikimethod"
        },
        "$:/palettes/Blanca": {
            "title": "$:/palettes/Blanca",
            "name": "Blanca",
            "description": "A clean white palette to let you focus",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #66cccc\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #999999\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #ffffff\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #7897f3\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #ccc\nsidebar-foreground-shadow: rgba(255,255,255, 0.8)\nsidebar-foreground: #acacac\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: #ffffff\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #444444\nsidebar-tiddler-link-foreground: #7897f3\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #eeeeee\ntab-border-selected: #cccccc\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #ffeedd\ntag-foreground: #000\ntiddler-background: <<colour background>>\ntiddler-border: #eee\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #ff9900\ntoolbar-new-button:\ntoolbar-options-button:\ntoolbar-save-button:\ntoolbar-info-button:\ntoolbar-edit-button:\ntoolbar-close-button:\ntoolbar-delete-button:\ntoolbar-cancel-button:\ntoolbar-done-button:\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/Blue": {
            "title": "$:/palettes/Blue",
            "name": "Blue",
            "description": "A blue theme",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #fff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour foreground>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333353\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #999999\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #ddddff\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #5778d8\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #ffffff\nsidebar-foreground-shadow: rgba(255,255,255, 0.8)\nsidebar-foreground: #acacac\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: <<colour page-background>>\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #444444\nsidebar-tiddler-link-foreground: #5959c0\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: <<colour background>>\ntab-background: #ccccdd\ntab-border-selected: #ccccdd\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #eeeeff\ntag-foreground: #000\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #666666\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #ffffff\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #ffffff\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #5959c0\ntoolbar-new-button: #5eb95e\ntoolbar-options-button: rgb(128, 88, 165)\ntoolbar-save-button: #0e90d2\ntoolbar-info-button: #0e90d2\ntoolbar-edit-button: rgb(243, 123, 29)\ntoolbar-close-button: #dd514c\ntoolbar-delete-button: #dd514c\ntoolbar-cancel-button: rgb(243, 123, 29)\ntoolbar-done-button: #5eb95e\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/Muted": {
            "title": "$:/palettes/Muted",
            "name": "Muted",
            "description": "Bright tiddlers on a muted background",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #bbb\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #6f6f70\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #29a6ee\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #c2c1c2\nsidebar-foreground-shadow: rgba(255,255,255,0)\nsidebar-foreground: #d3d2d4\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: #6f6f70\nsidebar-tab-background: #666667\nsidebar-tab-border-selected: #999\nsidebar-tab-border: #515151\nsidebar-tab-divider: #999\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: #999\nsidebar-tiddler-link-foreground-hover: #444444\nsidebar-tiddler-link-foreground: #d1d0d2\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #d8d8d8\ntab-border-selected: #d8d8d8\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #d5ad34\ntag-foreground: #ffffff\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #182955\ntoolbar-new-button: \ntoolbar-options-button: \ntoolbar-save-button: \ntoolbar-info-button: \ntoolbar-edit-button: \ntoolbar-close-button: \ntoolbar-delete-button: \ntoolbar-cancel-button: \ntoolbar-done-button: \nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/ContrastLight": {
            "title": "$:/palettes/ContrastLight",
            "name": "Contrast (Light)",
            "description": "High contrast and unambiguous (light version)",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #f00\nalert-border: <<colour background>>\nalert-highlight: <<colour foreground>>\nalert-muted-foreground: #800\nbackground: #fff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background: <<colour background>>\nbutton-foreground: <<colour foreground>>\nbutton-border: <<colour foreground>>\ncode-background: <<colour background>>\ncode-border: <<colour foreground>>\ncode-foreground: <<colour foreground>>\ndirty-indicator: #f00\ndownload-background: #080\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: <<colour foreground>>\ndropdown-tab-background: <<colour foreground>>\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #00a\nexternal-link-foreground: #00e\nforeground: #000\nmessage-background: <<colour foreground>>\nmessage-border: <<colour background>>\nmessage-foreground: <<colour background>>\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: <<colour foreground>>\nmodal-footer-background: <<colour background>>\nmodal-footer-border: <<colour foreground>>\nmodal-header-border: <<colour foreground>>\nmuted-foreground: <<colour foreground>>\nnotification-background: <<colour background>>\nnotification-border: <<colour foreground>>\npage-background: <<colour background>>\npre-background: <<colour background>>\npre-border: <<colour foreground>>\nprimary: #00f\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: <<colour background>>\nsidebar-controls-foreground: <<colour foreground>>\nsidebar-foreground-shadow: rgba(0,0,0, 0)\nsidebar-foreground: <<colour foreground>>\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: <<colour foreground>>\nsidebar-tab-background-selected: <<colour background>>\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: <<colour foreground>>\nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: <<colour foreground>>\nsidebar-tiddler-link-foreground: <<colour primary>>\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: <<colour background>>\ntab-background: <<colour foreground>>\ntab-border-selected: <<colour foreground>>\ntab-border: <<colour foreground>>\ntab-divider: <<colour foreground>>\ntab-foreground-selected: <<colour foreground>>\ntab-foreground: <<colour background>>\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #000\ntag-foreground: #fff\ntiddler-background: <<colour background>>\ntiddler-border: <<colour foreground>>\ntiddler-controls-foreground-hover: #ddd\ntiddler-controls-foreground-selected: #fdd\ntiddler-controls-foreground: <<colour foreground>>\ntiddler-editor-background: <<colour background>>\ntiddler-editor-border-image: <<colour foreground>>\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: <<colour background>>\ntiddler-editor-fields-odd: <<colour background>>\ntiddler-info-background: <<colour background>>\ntiddler-info-border: <<colour foreground>>\ntiddler-info-tab-background: <<colour background>>\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: <<colour foreground>>\ntiddler-title-foreground: <<colour foreground>>\ntoolbar-new-button: \ntoolbar-options-button: \ntoolbar-save-button: \ntoolbar-info-button: \ntoolbar-edit-button: \ntoolbar-close-button: \ntoolbar-delete-button: \ntoolbar-cancel-button: \ntoolbar-done-button: \nuntagged-background: <<colour foreground>>\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/ContrastDark": {
            "title": "$:/palettes/ContrastDark",
            "name": "Contrast (Dark)",
            "description": "High contrast and unambiguous (dark version)",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #f00\nalert-border: <<colour background>>\nalert-highlight: <<colour foreground>>\nalert-muted-foreground: #800\nbackground: #000\nblockquote-bar: <<colour muted-foreground>>\nbutton-background: <<colour background>>\nbutton-foreground: <<colour foreground>>\nbutton-border: <<colour foreground>>\ncode-background: <<colour background>>\ncode-border: <<colour foreground>>\ncode-foreground: <<colour foreground>>\ndirty-indicator: #f00\ndownload-background: #080\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: <<colour foreground>>\ndropdown-tab-background: <<colour foreground>>\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #00a\nexternal-link-foreground: #00e\nforeground: #fff\nmessage-background: <<colour foreground>>\nmessage-border: <<colour background>>\nmessage-foreground: <<colour background>>\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: <<colour foreground>>\nmodal-footer-background: <<colour background>>\nmodal-footer-border: <<colour foreground>>\nmodal-header-border: <<colour foreground>>\nmuted-foreground: <<colour foreground>>\nnotification-background: <<colour background>>\nnotification-border: <<colour foreground>>\npage-background: <<colour background>>\npre-background: <<colour background>>\npre-border: <<colour foreground>>\nprimary: #00f\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: <<colour background>>\nsidebar-controls-foreground: <<colour foreground>>\nsidebar-foreground-shadow: rgba(0,0,0, 0)\nsidebar-foreground: <<colour foreground>>\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: <<colour foreground>>\nsidebar-tab-background-selected: <<colour background>>\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: <<colour foreground>>\nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: <<colour foreground>>\nsidebar-tiddler-link-foreground: <<colour primary>>\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: <<colour background>>\ntab-background: <<colour foreground>>\ntab-border-selected: <<colour foreground>>\ntab-border: <<colour foreground>>\ntab-divider: <<colour foreground>>\ntab-foreground-selected: <<colour foreground>>\ntab-foreground: <<colour background>>\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #fff\ntag-foreground: #000\ntiddler-background: <<colour background>>\ntiddler-border: <<colour foreground>>\ntiddler-controls-foreground-hover: #ddd\ntiddler-controls-foreground-selected: #fdd\ntiddler-controls-foreground: <<colour foreground>>\ntiddler-editor-background: <<colour background>>\ntiddler-editor-border-image: <<colour foreground>>\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: <<colour background>>\ntiddler-editor-fields-odd: <<colour background>>\ntiddler-info-background: <<colour background>>\ntiddler-info-border: <<colour foreground>>\ntiddler-info-tab-background: <<colour background>>\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: <<colour foreground>>\ntiddler-title-foreground: <<colour foreground>>\ntoolbar-new-button: \ntoolbar-options-button: \ntoolbar-save-button: \ntoolbar-info-button: \ntoolbar-edit-button: \ntoolbar-close-button: \ntoolbar-delete-button: \ntoolbar-cancel-button: \ntoolbar-done-button: \nuntagged-background: <<colour foreground>>\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/DarkPhotos": {
            "created": "20150402111612188",
            "description": "Good with dark photo backgrounds",
            "modified": "20150402112344080",
            "name": "DarkPhotos",
            "tags": "$:/tags/Palette",
            "title": "$:/palettes/DarkPhotos",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background: \nbutton-foreground: \nbutton-border: \ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #ddd\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #336438\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #5778d8\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #ccf\nsidebar-controls-foreground: #fff\nsidebar-foreground-shadow: rgba(0,0,0, 0.5)\nsidebar-foreground: #fff\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #eee\nsidebar-tab-background-selected: rgba(255,255,255, 0.8)\nsidebar-tab-background: rgba(255,255,255, 0.4)\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: rgba(255,255,255, 0.2)\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #aaf\nsidebar-tiddler-link-foreground: #ddf\nsite-title-foreground: #fff\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #d8d8d8\ntab-border-selected: #d8d8d8\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #ec6\ntag-foreground: #ffffff\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #182955\ntoolbar-new-button: \ntoolbar-options-button: \ntoolbar-save-button: \ntoolbar-info-button: \ntoolbar-edit-button: \ntoolbar-close-button: \ntoolbar-delete-button: \ntoolbar-cancel-button: \ntoolbar-done-button: \nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/Rocker": {
            "title": "$:/palettes/Rocker",
            "name": "Rocker",
            "description": "A dark theme",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #999999\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #000\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #cc0000\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #ffffff\nsidebar-foreground-shadow: rgba(255,255,255, 0.0)\nsidebar-foreground: #acacac\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: #000\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #ffbb99\nsidebar-tiddler-link-foreground: #cc0000\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #d8d8d8\ntab-border-selected: #d8d8d8\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #ffbb99\ntag-foreground: #000\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #cc0000\ntoolbar-new-button:\ntoolbar-options-button:\ntoolbar-save-button:\ntoolbar-info-button:\ntoolbar-edit-button:\ntoolbar-close-button:\ntoolbar-delete-button:\ntoolbar-cancel-button:\ntoolbar-done-button:\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/SolarFlare": {
            "title": "$:/palettes/SolarFlare",
            "name": "Solar Flare",
            "description": "Warm, relaxing earth colours",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": ": Background Tones\n\nbase03: #002b36\nbase02: #073642\n\n: Content Tones\n\nbase01: #586e75\nbase00: #657b83\nbase0: #839496\nbase1: #93a1a1\n\n: Background Tones\n\nbase2: #eee8d5\nbase3: #fdf6e3\n\n: Accent Colors\n\nyellow: #b58900\norange: #cb4b16\nred: #dc322f\nmagenta: #d33682\nviolet: #6c71c4\nblue: #268bd2\ncyan: #2aa198\ngreen: #859900\n\n: Additional Tones (RA)\n\nbase10: #c0c4bb\nviolet-muted: #7c81b0\nblue-muted: #4e7baa\n\nyellow-hot: #ffcc44\norange-hot: #eb6d20\nred-hot: #ff2222\nblue-hot: #2298ee\ngreen-hot: #98ee22\n\n: Palette\n\n: Do not use colour macro for background and foreground\nbackground: #fdf6e3\n    download-foreground: <<colour background>>\n    dragger-foreground: <<colour background>>\n    dropdown-background: <<colour background>>\n    modal-background: <<colour background>>\n    sidebar-foreground-shadow: <<colour background>>\n    tiddler-background: <<colour background>>\n    tiddler-border: <<colour background>>\n    tiddler-link-background: <<colour background>>\n    tab-background-selected: <<colour background>>\n        dropdown-tab-background-selected: <<colour tab-background-selected>>\nforeground: #657b83\n    dragger-background: <<colour foreground>>\n    tab-foreground: <<colour foreground>>\n        tab-foreground-selected: <<colour tab-foreground>>\n            sidebar-tab-foreground-selected: <<colour tab-foreground-selected>>\n        sidebar-tab-foreground: <<colour tab-foreground>>\n    sidebar-button-foreground: <<colour foreground>>\n    sidebar-controls-foreground: <<colour foreground>>\n    sidebar-foreground: <<colour foreground>>\n: base03\n: base02\n: base01\n    alert-muted-foreground: <<colour base01>>\n: base00\n    code-foreground: <<colour base00>>\n    message-foreground: <<colour base00>>\n    tag-foreground: <<colour base00>>\n: base0\n    sidebar-tiddler-link-foreground: <<colour base0>>\n: base1\n    muted-foreground: <<colour base1>>\n        blockquote-bar: <<colour muted-foreground>>\n        dropdown-border: <<colour muted-foreground>>\n        sidebar-muted-foreground: <<colour muted-foreground>>\n        tiddler-title-foreground: <<colour muted-foreground>>\n            site-title-foreground: <<colour tiddler-title-foreground>>\n: base2\n    modal-footer-background: <<colour base2>>\n    page-background: <<colour base2>>\n        modal-backdrop: <<colour page-background>>\n        notification-background: <<colour page-background>>\n        code-background: <<colour page-background>>\n            code-border: <<colour code-background>>\n        pre-background: <<colour page-background>>\n            pre-border: <<colour pre-background>>\n        sidebar-tab-background-selected: <<colour page-background>>\n    table-header-background: <<colour base2>>\n    tag-background: <<colour base2>>\n    tiddler-editor-background: <<colour base2>>\n    tiddler-info-background: <<colour base2>>\n    tiddler-info-tab-background: <<colour base2>>\n    tab-background: <<colour base2>>\n        dropdown-tab-background: <<colour tab-background>>\n: base3\n    alert-background: <<colour base3>>\n    message-background: <<colour base3>>\n: yellow\n: orange\n: red\n: magenta\n    alert-highlight: <<colour magenta>>\n: violet\n    external-link-foreground: <<colour violet>>\n: blue\n: cyan\n: green\n: base10\n    tiddler-controls-foreground: <<colour base10>>\n: violet-muted\n    external-link-foreground-visited: <<colour violet-muted>>\n: blue-muted\n    primary: <<colour blue-muted>>\n        download-background: <<colour primary>>\n        tiddler-link-foreground: <<colour primary>>\n\nalert-border: #b99e2f\ndirty-indicator: #ff0000\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nmessage-border: #cfd6e6\nmodal-border: #999999\nsidebar-controls-foreground-hover:\nsidebar-muted-foreground-hover:\nsidebar-tab-background: #ded8c5\nsidebar-tiddler-link-foreground-hover:\nstatic-alert-foreground: #aaaaaa\ntab-border: #cccccc\n    modal-footer-border: <<colour tab-border>>\n    modal-header-border: <<colour tab-border>>\n    notification-border: <<colour tab-border>>\n    sidebar-tab-border: <<colour tab-border>>\n    tab-border-selected: <<colour tab-border>>\n        sidebar-tab-border-selected: <<colour tab-border-selected>>\ntab-divider: #d8d8d8\n    sidebar-tab-divider: <<colour tab-divider>>\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-border: #dddddd\ntiddler-subtitle-foreground: #c0c0c0\ntoolbar-new-button:\ntoolbar-options-button:\ntoolbar-save-button:\ntoolbar-info-button:\ntoolbar-edit-button:\ntoolbar-close-button:\ntoolbar-delete-button:\ntoolbar-cancel-button:\ntoolbar-done-button:\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/Vanilla": {
            "title": "$:/palettes/Vanilla",
            "name": "Vanilla",
            "description": "Pale and unobtrusive",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #bbb\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #f4f4f4\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #5778d8\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #aaaaaa\nsidebar-foreground-shadow: rgba(255,255,255, 0.8)\nsidebar-foreground: #acacac\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: #f4f4f4\nsidebar-tab-background: #e0e0e0\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: #e4e4e4\nsidebar-tab-foreground-selected:\nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #444444\nsidebar-tiddler-link-foreground: #999999\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #d8d8d8\ntab-border-selected: #d8d8d8\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #ec6\ntag-foreground: #ffffff\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #182955\ntoolbar-new-button:\ntoolbar-options-button:\ntoolbar-save-button:\ntoolbar-info-button:\ntoolbar-edit-button:\ntoolbar-close-button:\ntoolbar-delete-button:\ntoolbar-cancel-button:\ntoolbar-done-button:\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/core/readme": {
            "title": "$:/core/readme",
            "text": "This plugin contains TiddlyWiki's core components, comprising:\n\n* JavaScript code modules\n* Icons\n* Templates needed to create TiddlyWiki's user interface\n* British English (''en-GB'') translations of the localisable strings used by the core\n"
        },
        "$:/core/templates/alltiddlers.template.html": {
            "title": "$:/core/templates/alltiddlers.template.html",
            "type": "text/vnd.tiddlywiki-html",
            "text": "<!-- This template is provided for backwards compatibility with older versions of TiddlyWiki -->\n\n<$set name=\"exportFilter\" value=\"[!is[system]sort[title]]\">\n\n{{$:/core/templates/exporters/StaticRiver}}\n\n</$set>\n"
        },
        "$:/core/templates/canonical-uri-external-image": {
            "title": "$:/core/templates/canonical-uri-external-image",
            "text": "<!--\n\nThis template is used to assign the ''_canonical_uri'' field to external images.\n\nChange the `./images/` part to a different base URI. The URI can be relative or absolute.\n\n-->\n./images/<$view field=\"title\" format=\"doubleurlencoded\"/>"
        },
        "$:/core/templates/canonical-uri-external-text": {
            "title": "$:/core/templates/canonical-uri-external-text",
            "text": "<!--\n\nThis template is used to assign the ''_canonical_uri'' field to external text files.\n\nChange the `./text/` part to a different base URI. The URI can be relative or absolute.\n\n-->\n./text/<$view field=\"title\" format=\"doubleurlencoded\"/>.tid"
        },
        "$:/core/templates/css-tiddler": {
            "title": "$:/core/templates/css-tiddler",
            "text": "<!--\n\nThis template is used for saving CSS tiddlers as a style tag with data attributes representing the tiddler fields.\n\n-->`<style`<$fields template=' data-tiddler-$name$=\"$encoded_value$\"'></$fields>` type=\"text/css\">`<$view field=\"text\" format=\"text\" />`</style>`"
        },
        "$:/core/templates/exporters/CsvFile": {
            "title": "$:/core/templates/exporters/CsvFile",
            "tags": "$:/tags/Exporter",
            "description": "{{$:/language/Exporters/CsvFile}}",
            "extension": ".csv",
            "text": "\\define renderContent()\n<$text text=<<csvtiddlers filter:\"\"\"$(exportFilter)$\"\"\" format:\"quoted-comma-sep\">>/>\n\\end\n<<renderContent>>\n"
        },
        "$:/core/templates/exporters/JsonFile": {
            "title": "$:/core/templates/exporters/JsonFile",
            "tags": "$:/tags/Exporter",
            "description": "{{$:/language/Exporters/JsonFile}}",
            "extension": ".json",
            "text": "\\define renderContent()\n<$text text=<<jsontiddlers filter:\"\"\"$(exportFilter)$\"\"\">>/>\n\\end\n<<renderContent>>\n"
        },
        "$:/core/templates/exporters/StaticRiver": {
            "title": "$:/core/templates/exporters/StaticRiver",
            "tags": "$:/tags/Exporter",
            "description": "{{$:/language/Exporters/StaticRiver}}",
            "extension": ".html",
            "text": "\\define tv-wikilink-template() #$uri_encoded$\n\\define tv-config-toolbar-icons() no\n\\define tv-config-toolbar-text() no\n\\define tv-config-toolbar-class() tc-btn-invisible\n\\rules only filteredtranscludeinline transcludeinline\n<!doctype html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" />\n<meta name=\"generator\" content=\"TiddlyWiki\" />\n<meta name=\"tiddlywiki-version\" content=\"{{$:/core/templates/version}}\" />\n<meta name=\"format-detection\" content=\"telephone=no\">\n<link id=\"faviconLink\" rel=\"shortcut icon\" href=\"favicon.ico\">\n<title>{{$:/core/wiki/title}}</title>\n<div id=\"styleArea\">\n{{$:/boot/boot.css||$:/core/templates/css-tiddler}}\n</div>\n<style type=\"text/css\">\n{{$:/core/ui/PageStylesheet||$:/core/templates/wikified-tiddler}}\n</style>\n</head>\n<body class=\"tc-body\">\n{{$:/StaticBanner||$:/core/templates/html-tiddler}}\n<section class=\"tc-story-river\">\n{{$:/core/templates/exporters/StaticRiver/Content||$:/core/templates/html-tiddler}}\n</section>\n</body>\n</html>\n"
        },
        "$:/core/templates/exporters/StaticRiver/Content": {
            "title": "$:/core/templates/exporters/StaticRiver/Content",
            "text": "\\define renderContent()\n{{{ $(exportFilter)$ ||$:/core/templates/static-tiddler}}}\n\\end\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n<<renderContent>>\n</$importvariables>\n"
        },
        "$:/core/templates/exporters/TidFile": {
            "title": "$:/core/templates/exporters/TidFile",
            "tags": "$:/tags/Exporter",
            "description": "{{$:/language/Exporters/TidFile}}",
            "extension": ".tid",
            "text": "\\define renderContent()\n{{{ $(exportFilter)$ +[limit[1]] ||$:/core/templates/tid-tiddler}}}\n\\end\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\"><<renderContent>></$importvariables>"
        },
        "$:/core/templates/html-div-tiddler": {
            "title": "$:/core/templates/html-div-tiddler",
            "text": "<!--\n\nThis template is used for saving tiddlers as an HTML DIV tag with attributes representing the tiddler fields.\n\n-->`<div`<$fields template=' $name$=\"$encoded_value$\"'></$fields>`>\n<pre>`<$view field=\"text\" format=\"htmlencoded\" />`</pre>\n</div>`\n"
        },
        "$:/core/templates/html-tiddler": {
            "title": "$:/core/templates/html-tiddler",
            "text": "<!--\n\nThis template is used for saving tiddlers as raw HTML\n\n--><$view field=\"text\" format=\"htmlwikified\" />"
        },
        "$:/core/templates/javascript-tiddler": {
            "title": "$:/core/templates/javascript-tiddler",
            "text": "<!--\n\nThis template is used for saving JavaScript tiddlers as a script tag with data attributes representing the tiddler fields.\n\n-->`<script`<$fields template=' data-tiddler-$name$=\"$encoded_value$\"'></$fields>` type=\"text/javascript\">`<$view field=\"text\" format=\"text\" />`</script>`"
        },
        "$:/core/templates/module-tiddler": {
            "title": "$:/core/templates/module-tiddler",
            "text": "<!--\n\nThis template is used for saving JavaScript tiddlers as a script tag with data attributes representing the tiddler fields. The body of the tiddler is wrapped in a call to the `$tw.modules.define` function in order to define the body of the tiddler as a module\n\n-->`<script`<$fields template=' data-tiddler-$name$=\"$encoded_value$\"'></$fields>` type=\"text/javascript\" data-module=\"yes\">$tw.modules.define(\"`<$view field=\"title\" format=\"jsencoded\" />`\",\"`<$view field=\"module-type\" format=\"jsencoded\" />`\",function(module,exports,require) {`<$view field=\"text\" format=\"text\" />`});\n</script>`"
        },
        "$:/core/templates/MOTW.html": {
            "title": "$:/core/templates/MOTW.html",
            "text": "\\rules only filteredtranscludeinline transcludeinline entity\n<!-- The following comment is called a MOTW comment and is necessary for the TiddlyIE Internet Explorer extension -->\n<!-- saved from url=(0021)http://tiddlywiki.com -->&#13;&#10;"
        },
        "$:/core/templates/plain-text-tiddler": {
            "title": "$:/core/templates/plain-text-tiddler",
            "text": "<$view field=\"text\" format=\"text\" />"
        },
        "$:/core/templates/raw-static-tiddler": {
            "title": "$:/core/templates/raw-static-tiddler",
            "text": "<!--\n\nThis template is used for saving tiddlers as static HTML\n\n--><$view field=\"text\" format=\"plainwikified\" />"
        },
        "$:/core/save/all": {
            "title": "$:/core/save/all",
            "text": "\\define saveTiddlerFilter()\n[is[tiddler]] -[prefix[$:/state/popup/]] -[[$:/HistoryList]] -[[$:/boot/boot.css]] -[type[application/javascript]library[yes]] -[[$:/boot/boot.js]] -[[$:/boot/bootprefix.js]] +[sort[title]] $(publishFilter)$\n\\end\n{{$:/core/templates/tiddlywiki5.html}}\n"
        },
        "$:/core/save/empty": {
            "title": "$:/core/save/empty",
            "text": "\\define saveTiddlerFilter()\n[is[system]] -[prefix[$:/state/popup/]] -[[$:/boot/boot.css]] -[type[application/javascript]library[yes]] -[[$:/boot/boot.js]] -[[$:/boot/bootprefix.js]] +[sort[title]]\n\\end\n{{$:/core/templates/tiddlywiki5.html}}\n"
        },
        "$:/core/save/lazy-all": {
            "title": "$:/core/save/lazy-all",
            "text": "\\define saveTiddlerFilter()\n[is[system]] -[prefix[$:/state/popup/]] -[[$:/HistoryList]] -[[$:/boot/boot.css]] -[type[application/javascript]library[yes]] -[[$:/boot/boot.js]] -[[$:/boot/bootprefix.js]] +[sort[title]] \n\\end\n{{$:/core/templates/tiddlywiki5.html}}\n"
        },
        "$:/core/save/lazy-images": {
            "title": "$:/core/save/lazy-images",
            "text": "\\define saveTiddlerFilter()\n[is[tiddler]] -[prefix[$:/state/popup/]] -[[$:/HistoryList]] -[[$:/boot/boot.css]] -[type[application/javascript]library[yes]] -[[$:/boot/boot.js]] -[[$:/boot/bootprefix.js]] -[!is[system]is[image]] +[sort[title]] \n\\end\n{{$:/core/templates/tiddlywiki5.html}}\n"
        },
        "$:/core/templates/single.tiddler.window": {
            "title": "$:/core/templates/single.tiddler.window",
            "text": "<$set name=\"themeTitle\" value={{$:/view}}>\n\n<$set name=\"tempCurrentTiddler\" value=<<currentTiddler>>>\n\n<$set name=\"currentTiddler\" value={{$:/language}}>\n\n<$set name=\"languageTitle\" value={{!!name}}>\n\n<$set name=\"currentTiddler\" value=<<tempCurrentTiddler>>>\n\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n\n<$navigator story=\"$:/StoryList\" history=\"$:/HistoryList\">\n\n<$transclude mode=\"block\"/>\n\n</$navigator>\n\n</$importvariables>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n"
        },
        "$:/core/templates/split-recipe": {
            "title": "$:/core/templates/split-recipe",
            "text": "<$list filter=\"[!is[system]]\">\ntiddler: <$view field=\"title\" format=\"urlencoded\"/>.tid\n</$list>\n"
        },
        "$:/core/templates/static-tiddler": {
            "title": "$:/core/templates/static-tiddler",
            "text": "<a name=<<currentTiddler>>>\n<$transclude tiddler=\"$:/core/ui/ViewTemplate\"/>\n</a>"
        },
        "$:/core/templates/static.area": {
            "title": "$:/core/templates/static.area",
            "text": "<$reveal type=\"nomatch\" state=\"$:/isEncrypted\" text=\"yes\">\n{{{ [all[shadows+tiddlers]tag[$:/tags/RawStaticContent]!has[draft.of]] ||$:/core/templates/raw-static-tiddler}}}\n{{$:/core/templates/static.content||$:/core/templates/html-tiddler}}\n</$reveal>\n<$reveal type=\"match\" state=\"$:/isEncrypted\" text=\"yes\">\nThis file contains an encrypted ~TiddlyWiki. Enable ~JavaScript and enter the decryption password when prompted.\n</$reveal>\n"
        },
        "$:/core/templates/static.content": {
            "title": "$:/core/templates/static.content",
            "type": "text/vnd.tiddlywiki",
            "text": "<!-- For Google, and people without JavaScript-->\nThis [[TiddlyWiki|http://tiddlywiki.com]] contains the following tiddlers:\n\n<ul>\n<$list filter=<<saveTiddlerFilter>>>\n<li><$view field=\"title\" format=\"text\"></$view></li>\n</$list>\n</ul>\n"
        },
        "$:/core/templates/static.template.css": {
            "title": "$:/core/templates/static.template.css",
            "text": "{{$:/boot/boot.css||$:/core/templates/plain-text-tiddler}}\n\n{{$:/core/ui/PageStylesheet||$:/core/templates/wikified-tiddler}}\n"
        },
        "$:/core/templates/static.template.html": {
            "title": "$:/core/templates/static.template.html",
            "type": "text/vnd.tiddlywiki-html",
            "text": "\\define tv-wikilink-template() static/$uri_doubleencoded$.html\n\\define tv-config-toolbar-icons() no\n\\define tv-config-toolbar-text() no\n\\define tv-config-toolbar-class() tc-btn-invisible\n\\rules only filteredtranscludeinline transcludeinline\n<!doctype html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" />\n<meta name=\"generator\" content=\"TiddlyWiki\" />\n<meta name=\"tiddlywiki-version\" content=\"{{$:/core/templates/version}}\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n<meta name=\"apple-mobile-web-app-capable\" content=\"yes\" />\n<meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\" />\n<meta name=\"mobile-web-app-capable\" content=\"yes\"/>\n<meta name=\"format-detection\" content=\"telephone=no\">\n<link id=\"faviconLink\" rel=\"shortcut icon\" href=\"favicon.ico\">\n<title>{{$:/core/wiki/title}}</title>\n<div id=\"styleArea\">\n{{$:/boot/boot.css||$:/core/templates/css-tiddler}}\n</div>\n<style type=\"text/css\">\n{{$:/core/ui/PageStylesheet||$:/core/templates/wikified-tiddler}}\n</style>\n</head>\n<body class=\"tc-body\">\n{{$:/StaticBanner||$:/core/templates/html-tiddler}}\n{{$:/core/ui/PageTemplate||$:/core/templates/html-tiddler}}\n</body>\n</html>\n"
        },
        "$:/core/templates/static.tiddler.html": {
            "title": "$:/core/templates/static.tiddler.html",
            "text": "\\define tv-wikilink-template() $uri_doubleencoded$.html\n\\define tv-config-toolbar-icons() no\n\\define tv-config-toolbar-text() no\n\\define tv-config-toolbar-class() tc-btn-invisible\n`<!doctype html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" />\n<meta name=\"generator\" content=\"TiddlyWiki\" />\n<meta name=\"tiddlywiki-version\" content=\"`{{$:/core/templates/version}}`\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n<meta name=\"apple-mobile-web-app-capable\" content=\"yes\" />\n<meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\" />\n<meta name=\"mobile-web-app-capable\" content=\"yes\"/>\n<meta name=\"format-detection\" content=\"telephone=no\">\n<link id=\"faviconLink\" rel=\"shortcut icon\" href=\"favicon.ico\">\n<link rel=\"stylesheet\" href=\"static.css\">\n<title>`<$view field=\"caption\"><$view field=\"title\"/></$view>: {{$:/core/wiki/title}}`</title>\n</head>\n<body class=\"tc-body\">\n`{{$:/StaticBanner||$:/core/templates/html-tiddler}}`\n<section class=\"tc-story-river\">\n`<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n<$view tiddler=\"$:/core/ui/ViewTemplate\" format=\"htmlwikified\"/>\n</$importvariables>`\n</section>\n</body>\n</html>\n`"
        },
        "$:/core/templates/store.area.template.html": {
            "title": "$:/core/templates/store.area.template.html",
            "text": "<$reveal type=\"nomatch\" state=\"$:/isEncrypted\" text=\"yes\">\n`<div id=\"storeArea\" style=\"display:none;\">`\n<$list filter=<<saveTiddlerFilter>> template=\"$:/core/templates/html-div-tiddler\"/>\n`</div>`\n</$reveal>\n<$reveal type=\"match\" state=\"$:/isEncrypted\" text=\"yes\">\n`<!--~~ Encrypted tiddlers ~~-->`\n`<pre id=\"encryptedStoreArea\" type=\"text/plain\" style=\"display:none;\">`\n<$encrypt filter=<<saveTiddlerFilter>>/>\n`</pre>`\n</$reveal>"
        },
        "$:/core/templates/tid-tiddler": {
            "title": "$:/core/templates/tid-tiddler",
            "text": "<!--\n\nThis template is used for saving tiddlers in TiddlyWeb *.tid format\n\n--><$fields exclude='text bag' template='$name$: $value$\n'></$fields>`\n`<$view field=\"text\" format=\"text\" />"
        },
        "$:/core/templates/tiddler-metadata": {
            "title": "$:/core/templates/tiddler-metadata",
            "text": "<!--\n\nThis template is used for saving tiddler metadata *.meta files\n\n--><$fields exclude='text bag' template='$name$: $value$\n'></$fields>"
        },
        "$:/core/templates/tiddlywiki5.html": {
            "title": "$:/core/templates/tiddlywiki5.html",
            "text": "\\rules only filteredtranscludeinline transcludeinline\n<!doctype html>\n{{$:/core/templates/MOTW.html}}<html>\n<head>\n<meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />\t\t<!-- Force IE standards mode for Intranet and HTA - should be the first meta -->\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" />\n<meta name=\"application-name\" content=\"TiddlyWiki\" />\n<meta name=\"generator\" content=\"TiddlyWiki\" />\n<meta name=\"tiddlywiki-version\" content=\"{{$:/core/templates/version}}\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n<meta name=\"apple-mobile-web-app-capable\" content=\"yes\" />\n<meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\" />\n<meta name=\"mobile-web-app-capable\" content=\"yes\"/>\n<meta name=\"format-detection\" content=\"telephone=no\" />\n<meta name=\"copyright\" content=\"{{$:/core/copyright.txt}}\" />\n<link id=\"faviconLink\" rel=\"shortcut icon\" href=\"favicon.ico\">\n<title>{{$:/core/wiki/title}}</title>\n<!--~~ This is a Tiddlywiki file. The points of interest in the file are marked with this pattern ~~-->\n\n<!--~~ Raw markup ~~-->\n{{{ [all[shadows+tiddlers]tag[$:/core/wiki/rawmarkup]] [all[shadows+tiddlers]tag[$:/tags/RawMarkup]] ||$:/core/templates/plain-text-tiddler}}}\n</head>\n<body class=\"tc-body\">\n<!--~~ Static styles ~~-->\n<div id=\"styleArea\">\n{{$:/boot/boot.css||$:/core/templates/css-tiddler}}\n</div>\n<!--~~ Static content for Google and browsers without JavaScript ~~-->\n<noscript>\n<div id=\"splashArea\">\n{{$:/core/templates/static.area}}\n</div>\n</noscript>\n<!--~~ Ordinary tiddlers ~~-->\n{{$:/core/templates/store.area.template.html}}\n<!--~~ Library modules ~~-->\n<div id=\"libraryModules\" style=\"display:none;\">\n{{{ [is[system]type[application/javascript]library[yes]] ||$:/core/templates/javascript-tiddler}}}\n</div>\n<!--~~ Boot kernel prologue ~~-->\n<div id=\"bootKernelPrefix\" style=\"display:none;\">\n{{ $:/boot/bootprefix.js ||$:/core/templates/javascript-tiddler}}\n</div>\n<!--~~ Boot kernel ~~-->\n<div id=\"bootKernel\" style=\"display:none;\">\n{{ $:/boot/boot.js ||$:/core/templates/javascript-tiddler}}\n</div>\n</body>\n</html>\n"
        },
        "$:/core/templates/version": {
            "title": "$:/core/templates/version",
            "text": "<<version>>"
        },
        "$:/core/templates/wikified-tiddler": {
            "title": "$:/core/templates/wikified-tiddler",
            "text": "<$transclude />"
        },
        "$:/core/ui/AboveStory/tw2-plugin-check": {
            "title": "$:/core/ui/AboveStory/tw2-plugin-check",
            "tags": "$:/tags/AboveStory",
            "text": "\\define lingo-base() $:/language/AboveStory/ClassicPlugin/\n<$list filter=\"[all[system+tiddlers]tag[systemConfig]limit[1]]\">\n\n<div class=\"tc-message-box\">\n\n<<lingo Warning>>\n\n<ul>\n\n<$list filter=\"[all[system+tiddlers]tag[systemConfig]limit[1]]\">\n\n<li>\n\n<$link><$view field=\"title\"/></$link>\n\n</li>\n\n</$list>\n\n</ul>\n\n</div>\n\n</$list>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter": {
            "title": "$:/core/ui/AdvancedSearch/Filter",
            "tags": "$:/tags/AdvancedSearch",
            "caption": "{{$:/language/Search/Filter/Caption}}",
            "text": "\\define lingo-base() $:/language/Search/\n<<lingo Filter/Hint>>\n\n<div class=\"tc-search tc-advanced-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/AdvancedSearch/FilterButton]!has[draft.of]]\"><$transclude/></$list>\n</div>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$set name=\"resultCount\" value=\"\"\"<$count filter={{$:/temp/advancedsearch}}/>\"\"\">\n<div class=\"tc-search-results\">\n<<lingo Filter/Matches>>\n<$list filter={{$:/temp/advancedsearch}} template=\"$:/core/ui/ListItemTemplate\"/>\n</div>\n</$set>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter/FilterButtons/clear": {
            "title": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/clear",
            "tags": "$:/tags/AdvancedSearch/FilterButton",
            "text": "<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter/FilterButtons/delete": {
            "title": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/delete",
            "tags": "$:/tags/AdvancedSearch/FilterButton",
            "text": "<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button popup=<<qualify \"$:/state/filterDeleteDropdown\">> class=\"tc-btn-invisible\">\n{{$:/core/images/delete-button}}\n</$button>\n</$reveal>\n\n<$reveal state=<<qualify \"$:/state/filterDeleteDropdown\">> type=\"popup\" position=\"belowleft\" animate=\"yes\">\n<div class=\"tc-block-dropdown-wrapper\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown\">\n<div class=\"tc-dropdown-item-plain\">\n<$set name=\"resultCount\" value=\"\"\"<$count filter={{$:/temp/advancedsearch}}/>\"\"\">\nAre you sure you wish to delete <<resultCount>> tiddler(s)?\n</$set>\n</div>\n<div class=\"tc-dropdown-item-plain\">\n<$button class=\"tc-btn\">\n<$action-deletetiddler $filter={{$:/temp/advancedsearch}}/>\nDelete these tiddlers\n</$button>\n</div>\n</div>\n</div>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter/FilterButtons/dropdown": {
            "title": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/dropdown",
            "tags": "$:/tags/AdvancedSearch/FilterButton",
            "text": "<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/filterDropdown\">> class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n</$button>\n</span>\n\n<$reveal state=<<qualify \"$:/state/filterDropdown\">> type=\"popup\" position=\"belowleft\" animate=\"yes\">\n<$linkcatcher to=\"$:/temp/advancedsearch\">\n<div class=\"tc-block-dropdown-wrapper\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Filter]]\"><$link to={{!!filter}}><$transclude field=\"description\"/></$link>\n</$list>\n</div>\n</div>\n</$linkcatcher>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter/FilterButtons/export": {
            "title": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/export",
            "tags": "$:/tags/AdvancedSearch/FilterButton",
            "text": "<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$macrocall $name=\"exportButton\" exportFilter={{$:/temp/advancedsearch}} lingoBase=\"$:/language/Buttons/ExportTiddlers/\"/>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Shadows": {
            "title": "$:/core/ui/AdvancedSearch/Shadows",
            "tags": "$:/tags/AdvancedSearch",
            "caption": "{{$:/language/Search/Shadows/Caption}}",
            "text": "\\define lingo-base() $:/language/Search/\n<$linkcatcher to=\"$:/temp/advancedsearch\">\n\n<<lingo Shadows/Hint>>\n\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n</div>\n\n</$linkcatcher>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n\n<$set name=\"resultCount\" value=\"\"\"<$count filter=\"[all[shadows]search{$:/temp/advancedsearch}] -[[$:/temp/advancedsearch]]\"/>\"\"\">\n\n<div class=\"tc-search-results\">\n\n<<lingo Shadows/Matches>>\n\n<$list filter=\"[all[shadows]search{$:/temp/advancedsearch}sort[title]limit[250]] -[[$:/temp/advancedsearch]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n\n</div>\n\n</$set>\n\n</$reveal>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"match\" text=\"\">\n\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Standard": {
            "title": "$:/core/ui/AdvancedSearch/Standard",
            "tags": "$:/tags/AdvancedSearch",
            "caption": "{{$:/language/Search/Standard/Caption}}",
            "text": "\\define lingo-base() $:/language/Search/\n<$linkcatcher to=\"$:/temp/advancedsearch\">\n\n<<lingo Standard/Hint>>\n\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n</div>\n\n</$linkcatcher>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$set name=\"searchTiddler\" value=\"$:/temp/advancedsearch\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]butfirst[]limit[1]]\" emptyMessage=\"\"\"\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]]\">\n<$transclude/>\n</$list>\n\"\"\">\n<$macrocall $name=\"tabs\" tabsList=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]]\" default={{$:/config/SearchResults/Default}}/>\n</$list>\n</$set>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/System": {
            "title": "$:/core/ui/AdvancedSearch/System",
            "tags": "$:/tags/AdvancedSearch",
            "caption": "{{$:/language/Search/System/Caption}}",
            "text": "\\define lingo-base() $:/language/Search/\n<$linkcatcher to=\"$:/temp/advancedsearch\">\n\n<<lingo System/Hint>>\n\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n</div>\n\n</$linkcatcher>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n\n<$set name=\"resultCount\" value=\"\"\"<$count filter=\"[is[system]search{$:/temp/advancedsearch}] -[[$:/temp/advancedsearch]]\"/>\"\"\">\n\n<div class=\"tc-search-results\">\n\n<<lingo System/Matches>>\n\n<$list filter=\"[is[system]search{$:/temp/advancedsearch}sort[title]limit[250]] -[[$:/temp/advancedsearch]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n\n</div>\n\n</$set>\n\n</$reveal>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"match\" text=\"\">\n\n</$reveal>\n"
        },
        "$:/AdvancedSearch": {
            "title": "$:/AdvancedSearch",
            "icon": "$:/core/images/advanced-search-button",
            "color": "#bbb",
            "text": "<div class=\"tc-advanced-search\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/AdvancedSearch]!has[draft.of]]\" \"$:/core/ui/AdvancedSearch/System\">>\n</div>\n"
        },
        "$:/core/ui/AlertTemplate": {
            "title": "$:/core/ui/AlertTemplate",
            "text": "<div class=\"tc-alert\">\n<div class=\"tc-alert-toolbar\">\n<$button class=\"tc-btn-invisible\"><$action-deletetiddler $tiddler=<<currentTiddler>>/>{{$:/core/images/delete-button}}</$button>\n</div>\n<div class=\"tc-alert-subtitle\">\n<$view field=\"component\"/> - <$view field=\"modified\" format=\"date\" template=\"0hh:0mm:0ss DD MM YYYY\"/> <$reveal type=\"nomatch\" state=\"!!count\" text=\"\"><span class=\"tc-alert-highlight\">({{$:/language/Count}}: <$view field=\"count\"/>)</span></$reveal>\n</div>\n<div class=\"tc-alert-body\">\n\n<$transclude/>\n\n</div>\n</div>\n"
        },
        "$:/core/ui/BinaryWarning": {
            "title": "$:/core/ui/BinaryWarning",
            "text": "\\define lingo-base() $:/language/BinaryWarning/\n<div class=\"tc-binary-warning\">\n\n<<lingo Prompt>>\n\n</div>\n"
        },
        "$:/core/ui/Components/tag-link": {
            "title": "$:/core/ui/Components/tag-link",
            "text": "<$link>\n<$set name=\"backgroundColor\" value={{!!color}}>\n<span style=<<tag-styles>> class=\"tc-tag-label\">\n<$view field=\"title\" format=\"text\"/>\n</span>\n</$set>\n</$link>"
        },
        "$:/core/ui/ControlPanel/Advanced": {
            "title": "$:/core/ui/ControlPanel/Advanced",
            "tags": "$:/tags/ControlPanel/Info",
            "caption": "{{$:/language/ControlPanel/Advanced/Caption}}",
            "text": "{{$:/language/ControlPanel/Advanced/Hint}}\n\n<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Advanced]!has[draft.of]]\" \"$:/core/ui/ControlPanel/TiddlerFields\">>\n</div>\n"
        },
        "$:/core/ui/ControlPanel/Appearance": {
            "title": "$:/core/ui/ControlPanel/Appearance",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Appearance/Caption}}",
            "text": "{{$:/language/ControlPanel/Appearance/Hint}}\n\n<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Appearance]!has[draft.of]]\" \"$:/core/ui/ControlPanel/Theme\">>\n</div>\n"
        },
        "$:/core/ui/ControlPanel/Basics": {
            "title": "$:/core/ui/ControlPanel/Basics",
            "tags": "$:/tags/ControlPanel/Info",
            "caption": "{{$:/language/ControlPanel/Basics/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Basics/\n\n\\define show-filter-count(filter)\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $value=\"\"\"$filter$\"\"\"/>\n<$action-setfield $tiddler=\"$:/state/tab--1498284803\" $value=\"$:/core/ui/AdvancedSearch/Filter\"/>\n<$action-navigate $to=\"$:/AdvancedSearch\"/>\n''<$count filter=\"\"\"$filter$\"\"\"/>''\n{{$:/core/images/advanced-search-button}}\n</$button>\n\\end\n\n|<<lingo Version/Prompt>> |''<<version>>'' |\n|<$link to=\"$:/SiteTitle\"><<lingo Title/Prompt>></$link> |<$edit-text tiddler=\"$:/SiteTitle\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/SiteSubtitle\"><<lingo Subtitle/Prompt>></$link> |<$edit-text tiddler=\"$:/SiteSubtitle\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/status/UserName\"><<lingo Username/Prompt>></$link> |<$edit-text tiddler=\"$:/status/UserName\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/config/AnimationDuration\"><<lingo AnimDuration/Prompt>></$link> |<$edit-text tiddler=\"$:/config/AnimationDuration\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/DefaultTiddlers\"><<lingo DefaultTiddlers/Prompt>></$link> |<<lingo DefaultTiddlers/TopHint>><br> <$edit tag=\"textarea\" tiddler=\"$:/DefaultTiddlers\" class=\"tc-edit-texteditor\"/><br>//<<lingo DefaultTiddlers/BottomHint>>// |\n|<$link to=\"$:/config/NewJournal/Title\"><<lingo NewJournal/Title/Prompt>></$link> |<$edit-text tiddler=\"$:/config/NewJournal/Title\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/config/NewJournal/Tags\"><<lingo NewJournal/Tags/Prompt>></$link> |<$edit-text tiddler=\"$:/config/NewJournal/Tags\" default=\"\" tag=\"input\"/> |\n|<<lingo Language/Prompt>> |{{$:/snippets/minilanguageswitcher}} |\n|<<lingo Tiddlers/Prompt>> |<<show-filter-count \"[!is[system]sort[title]]\">> |\n|<<lingo Tags/Prompt>> |<<show-filter-count \"[tags[]sort[title]]\">> |\n|<<lingo SystemTiddlers/Prompt>> |<<show-filter-count \"[is[system]sort[title]]\">> |\n|<<lingo ShadowTiddlers/Prompt>> |<<show-filter-count \"[all[shadows]sort[title]]\">> |\n|<<lingo OverriddenShadowTiddlers/Prompt>> |<<show-filter-count \"[is[tiddler]is[shadow]sort[title]]\">> |\n"
        },
        "$:/core/ui/ControlPanel/EditorTypes": {
            "title": "$:/core/ui/ControlPanel/EditorTypes",
            "tags": "$:/tags/ControlPanel/Advanced",
            "caption": "{{$:/language/ControlPanel/EditorTypes/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/EditorTypes/\n\n<<lingo Hint>>\n\n<table>\n<tbody>\n<tr>\n<th><<lingo Type/Caption>></th>\n<th><<lingo Editor/Caption>></th>\n</tr>\n<$list filter=\"[all[shadows+tiddlers]prefix[$:/config/EditorTypeMappings/]sort[title]]\">\n<tr>\n<td>\n<$link>\n<$list filter=\"[all[current]removeprefix[$:/config/EditorTypeMappings/]]\">\n<$text text={{!!title}}/>\n</$list>\n</$link>\n</td>\n<td>\n<$view field=\"text\"/>\n</td>\n</tr>\n</$list>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/ControlPanel/Info": {
            "title": "$:/core/ui/ControlPanel/Info",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Info/Caption}}",
            "text": "{{$:/language/ControlPanel/Info/Hint}}\n\n<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Info]!has[draft.of]]\" \"$:/core/ui/ControlPanel/Basics\">>\n</div>\n"
        },
        "$:/core/ui/ControlPanel/KeyboardShortcuts": {
            "title": "$:/core/ui/ControlPanel/KeyboardShortcuts",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/KeyboardShortcuts/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/KeyboardShortcuts/\n\n\\define new-shortcut(title)\n<div class=\"tc-dropdown-item-plain\">\n<$edit-shortcut tiddler=\"$title$\" placeholder={{$:/language/ControlPanel/KeyboardShortcuts/Add/Prompt}} style=\"width:auto;\"/> <$button>\n<<lingo Add/Caption>>\n<$action-listops\n\t$tiddler=\"$(shortcutTitle)$\"\n\t$field=\"text\"\n\t$subfilter=\"[{$title$}]\"\n/>\n<$action-deletetiddler\n\t$tiddler=\"$title$\"\n/>\n</$button>\n</div>\n\\end\n\n\\define shortcut-list-item(caption)\n<td>\n</td>\n<td style=\"text-align:right;font-size:0.7em;\">\n<<lingo Platform/$caption$>>\n</td>\n<td>\n<div style=\"position:relative;\">\n<$button popup=<<qualify \"$:/state/dropdown/$(shortcutTitle)$\">> class=\"tc-btn-invisible\">\n{{$:/core/images/edit-button}}\n</$button>\n<$macrocall $name=\"displayshortcuts\" $output=\"text/html\" shortcuts={{$(shortcutTitle)$}} prefix=\"<kbd>\" separator=\"</kbd> <kbd>\" suffix=\"</kbd>\"/>\n\n<$reveal state=<<qualify \"$:/state/dropdown/$(shortcutTitle)$\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-block-dropdown-wrapper\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown tc-popup-keep\">\n<$list filter=\"[list[$(shortcutTitle)$!!text]sort[title]]\" variable=\"shortcut\" emptyMessage=\"\"\"\n<div class=\"tc-dropdown-item-plain\">\n//<<lingo NoShortcuts/Caption>>//\n</div>\n\"\"\">\n<div class=\"tc-dropdown-item-plain\">\n<$button class=\"tc-btn-invisible\" tooltip=<<lingo Remove/Hint>>>\n<$action-listops\n\t$tiddler=\"$(shortcutTitle)$\"\n\t$field=\"text\"\n\t$subfilter=\"+[remove<shortcut>]\"\n/>\n&times;\n</$button>\n<kbd>\n<$macrocall $name=\"displayshortcuts\" $output=\"text/html\" shortcuts=<<shortcut>>/>\n</kbd>\n</div>\n</$list>\n<hr/>\n<$macrocall $name=\"new-shortcut\" title=<<qualify \"$:/state/new-shortcut/$(shortcutTitle)$\">>/>\n</div>\n</div>\n</$reveal>\n</div>\n</td>\n\\end\n\n\\define shortcut-list(caption,prefix)\n<tr>\n<$list filter=\"[all[tiddlers+shadows][$prefix$$(shortcutName)$]]\" variable=\"shortcutTitle\">\n<<shortcut-list-item \"$caption$\">>\n</$list>\n</tr>\n\\end\n\n\\define shortcut-editor()\n<<shortcut-list \"All\" \"$:/config/shortcuts/\">>\n<<shortcut-list \"Mac\" \"$:/config/shortcuts-mac/\">>\n<<shortcut-list \"NonMac\" \"$:/config/shortcuts-not-mac/\">>\n<<shortcut-list \"Linux\" \"$:/config/shortcuts-linux/\">>\n<<shortcut-list \"NonLinux\" \"$:/config/shortcuts-not-linux/\">>\n<<shortcut-list \"Windows\" \"$:/config/shortcuts-windows/\">>\n<<shortcut-list \"NonWindows\" \"$:/config/shortcuts-not-windows/\">>\n\\end\n\n\\define shortcut-preview()\n<$macrocall $name=\"displayshortcuts\" $output=\"text/html\" shortcuts={{$(shortcutPrefix)$$(shortcutName)$}} prefix=\"<kbd>\" separator=\"</kbd> <kbd>\" suffix=\"</kbd>\"/>\n\\end\n\n\\define shortcut-item-inner()\n<tr>\n<td>\n<$reveal type=\"nomatch\" state=<<dropdownStateTitle>> text=\"open\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield\n\t$tiddler=<<dropdownStateTitle>>\n\t$value=\"open\"\n/>\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<dropdownStateTitle>> text=\"open\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield\n\t$tiddler=<<dropdownStateTitle>>\n\t$value=\"close\"\n/>\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n''<$text text=<<shortcutName>>/>''\n</td>\n<td>\n<$transclude tiddler=\"$:/config/ShortcutInfo/$(shortcutName)$\"/>\n</td>\n<td>\n<$list filter=\"$:/config/shortcuts/ $:/config/shortcuts-mac/ $:/config/shortcuts-not-mac/ $:/config/shortcuts-linux/ $:/config/shortcuts-not-linux/ $:/config/shortcuts-windows/ $:/config/shortcuts-not-windows/\" variable=\"shortcutPrefix\">\n<<shortcut-preview>>\n</$list>\n</td>\n</tr>\n<$set name=\"dropdownState\" value={{$(dropdownStateTitle)$}}>\n<$list filter=\"[<dropdownState>prefix[open]]\" variable=\"listItem\">\n<<shortcut-editor>>\n</$list>\n</$set>\n\\end\n\n\\define shortcut-item()\n<$set name=\"dropdownStateTitle\" value=<<qualify \"$:/state/dropdown/keyboardshortcut/$(shortcutName)$\">>>\n<<shortcut-item-inner>>\n</$set>\n\\end\n\n<table>\n<tbody>\n<$list filter=\"[all[shadows+tiddlers]removeprefix[$:/config/ShortcutInfo/]]\" variable=\"shortcutName\">\n<<shortcut-item>>\n</$list>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/ControlPanel/LoadedModules": {
            "title": "$:/core/ui/ControlPanel/LoadedModules",
            "tags": "$:/tags/ControlPanel/Advanced",
            "caption": "{{$:/language/ControlPanel/LoadedModules/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/\n<<lingo LoadedModules/Hint>>\n\n{{$:/snippets/modules}}\n"
        },
        "$:/core/ui/ControlPanel/Modals/AddPlugins": {
            "title": "$:/core/ui/ControlPanel/Modals/AddPlugins",
            "subtitle": "{{$:/core/images/download-button}} {{$:/language/ControlPanel/Plugins/Add/Caption}}",
            "text": "\\define install-plugin-button()\n<$button>\n<$action-sendmessage $message=\"tm-load-plugin-from-library\" url={{!!url}} title={{$(assetInfo)$!!original-title}}/>\n<$list filter=\"[<assetInfo>get[original-title]get[version]]\" variable=\"installedVersion\" emptyMessage=\"\"\"{{$:/language/ControlPanel/Plugins/Install/Caption}}\"\"\">\n{{$:/language/ControlPanel/Plugins/Reinstall/Caption}}\n</$list>\n</$button>\n\\end\n\n\\define popup-state-macro()\n$:/state/add-plugin-info/$(connectionTiddler)$/$(assetInfo)$\n\\end\n\n\\define display-plugin-info(type)\n<$set name=\"popup-state\" value=<<popup-state-macro>>>\n<div class=\"tc-plugin-info\">\n<div class=\"tc-plugin-info-chunk tc-small-icon\">\n<$reveal type=\"nomatch\" state=<<popup-state>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<popup-state>> setTo=\"yes\">\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<popup-state>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<popup-state>> setTo=\"no\">\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<$list filter=\"[<assetInfo>has[icon]]\" emptyMessage=\"\"\"<$transclude tiddler=\"$:/core/images/plugin-generic-$type$\"/>\"\"\">\n<img src={{$(assetInfo)$!!icon}}/>\n</$list>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<h1><$view tiddler=<<assetInfo>> field=\"description\"/></h1>\n<h2><$view tiddler=<<assetInfo>> field=\"original-title\"/></h2>\n<div><em><$view tiddler=<<assetInfo>> field=\"version\"/></em></div>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<<install-plugin-button>>\n</div>\n</div>\n<$reveal type=\"match\" text=\"yes\" state=<<popup-state>>>\n<div class=\"tc-plugin-info-dropdown\">\n<div class=\"tc-plugin-info-dropdown-message\">\n<$list filter=\"[<assetInfo>get[original-title]get[version]]\" variable=\"installedVersion\" emptyMessage=\"\"\"{{$:/language/ControlPanel/Plugins/NotInstalled/Hint}}\"\"\">\n<em>\n{{$:/language/ControlPanel/Plugins/AlreadyInstalled/Hint}}\n</em>\n</$list>\n</div>\n<div class=\"tc-plugin-info-dropdown-body\">\n<$transclude tiddler=<<assetInfo>> field=\"readme\" mode=\"block\"/>\n</div>\n</div>\n</$reveal>\n</$set>\n\\end\n\n\\define load-plugin-library-button()\n<$button class=\"tc-btn-big-green\">\n<$action-sendmessage $message=\"tm-load-plugin-library\" url={{!!url}} infoTitlePrefix=\"$:/temp/RemoteAssetInfo/\"/>\n{{$:/core/images/chevron-right}} {{$:/language/ControlPanel/Plugins/OpenPluginLibrary}}\n</$button>\n\\end\n\n\\define display-server-assets(type)\n{{$:/language/Search/Search}}: <$edit-text tiddler=\"\"\"$:/temp/RemoteAssetSearch/$(currentTiddler)$\"\"\" default=\"\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"\"\"$:/temp/RemoteAssetSearch/$(currentTiddler)$\"\"\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"\"\"$:/temp/RemoteAssetSearch/$(currentTiddler)$\"\"\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n<div class=\"tc-plugin-library-listing\">\n<$list filter=\"[all[tiddlers+shadows]tag[$:/tags/RemoteAssetInfo]server-url{!!url}original-plugin-type[$type$]search{$:/temp/RemoteAssetSearch/$(currentTiddler)$}sort[description]]\" variable=\"assetInfo\">\n<<display-plugin-info \"$type$\">>\n</$list>\n</div>\n\\end\n\n\\define display-server-connection()\n<$list filter=\"[all[tiddlers+shadows]tag[$:/tags/ServerConnection]suffix{!!url}]\" variable=\"connectionTiddler\" emptyMessage=<<load-plugin-library-button>>>\n\n<<tabs \"[[$:/core/ui/ControlPanel/Plugins/Add/Plugins]] [[$:/core/ui/ControlPanel/Plugins/Add/Themes]] [[$:/core/ui/ControlPanel/Plugins/Add/Languages]]\" \"$:/core/ui/ControlPanel/Plugins/Add/Plugins\">>\n\n</$list>\n\\end\n\n\\define plugin-library-listing()\n<$list filter=\"[all[tiddlers+shadows]tag[$:/tags/PluginLibrary]]\">\n<div class=\"tc-plugin-library\">\n\n!! <$link><$transclude field=\"caption\"><$view field=\"title\"/></$transclude></$link>\n\n//<$view field=\"url\"/>//\n\n<$transclude/>\n\n<<display-server-connection>>\n</div>\n</$list>\n\\end\n\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n\n<div>\n<<plugin-library-listing>>\n</div>\n\n</$importvariables>\n"
        },
        "$:/core/ui/ControlPanel/Palette": {
            "title": "$:/core/ui/ControlPanel/Palette",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ControlPanel/Palette/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Palette/\n\n{{$:/snippets/paletteswitcher}}\n\n<$reveal type=\"nomatch\" state=\"$:/state/ShowPaletteEditor\" text=\"yes\">\n\n<$button set=\"$:/state/ShowPaletteEditor\" setTo=\"yes\"><<lingo ShowEditor/Caption>></$button>\n\n</$reveal>\n\n<$reveal type=\"match\" state=\"$:/state/ShowPaletteEditor\" text=\"yes\">\n\n<$button set=\"$:/state/ShowPaletteEditor\" setTo=\"no\"><<lingo HideEditor/Caption>></$button>\n{{$:/snippets/paletteeditor}}\n\n</$reveal>\n\n"
        },
        "$:/core/ui/ControlPanel/Parsing": {
            "title": "$:/core/ui/ControlPanel/Parsing",
            "tags": "$:/tags/ControlPanel/Advanced",
            "caption": "{{$:/language/ControlPanel/Parsing/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Parsing/\n\n\\define parsing-inner(typeCap)\n<li>\n<$checkbox tiddler=\"\"\"$:/config/WikiParserRules/$typeCap$/$(currentTiddler)$\"\"\" field=\"text\" checked=\"enable\" unchecked=\"disable\" default=\"enable\"> ''<$text text=<<currentTiddler>>/>'': </$checkbox>\n</li>\n\\end\n\n\\define parsing-outer(typeLower,typeCap)\n<ul>\n<$list filter=\"[wikiparserrules[$typeLower$]]\">\n<<parsing-inner typeCap:\"$typeCap$\">>\n</$list>\n</ul>\n\\end\n\n<<lingo Hint>>\n\n! <<lingo Pragma/Caption>>\n\n<<parsing-outer typeLower:\"pragma\" typeCap:\"Pragma\">>\n\n! <<lingo Inline/Caption>>\n\n<<parsing-outer typeLower:\"inline\" typeCap:\"Inline\">>\n\n! <<lingo Block/Caption>>\n\n<<parsing-outer typeLower:\"block\" typeCap:\"Block\">>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Add/Languages": {
            "title": "$:/core/ui/ControlPanel/Plugins/Add/Languages",
            "caption": "{{$:/language/ControlPanel/Plugins/Languages/Caption}} (<$count filter=\"[all[tiddlers+shadows]tag[$:/tags/RemoteAssetInfo]server-url{!!url}original-plugin-type[language]]\"/>)",
            "text": "<<display-server-assets language>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Add/Plugins": {
            "title": "$:/core/ui/ControlPanel/Plugins/Add/Plugins",
            "caption": "{{$:/language/ControlPanel/Plugins/Plugins/Caption}}  (<$count filter=\"[all[tiddlers+shadows]tag[$:/tags/RemoteAssetInfo]server-url{!!url}original-plugin-type[plugin]]\"/>)",
            "text": "<<display-server-assets plugin>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Add/Themes": {
            "title": "$:/core/ui/ControlPanel/Plugins/Add/Themes",
            "caption": "{{$:/language/ControlPanel/Plugins/Themes/Caption}}  (<$count filter=\"[all[tiddlers+shadows]tag[$:/tags/RemoteAssetInfo]server-url{!!url}original-plugin-type[theme]]\"/>)",
            "text": "<<display-server-assets theme>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/AddPlugins": {
            "title": "$:/core/ui/ControlPanel/Plugins/AddPlugins",
            "text": "\\define lingo-base() $:/language/ControlPanel/Plugins/\n\n<$button message=\"tm-modal\" param=\"$:/core/ui/ControlPanel/Modals/AddPlugins\" tooltip={{$:/language/ControlPanel/Plugins/Add/Hint}} class=\"tc-btn-big-green\" style=\"background:blue;\">\n{{$:/core/images/download-button}} <<lingo Add/Caption>>\n</$button>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Installed/Languages": {
            "title": "$:/core/ui/ControlPanel/Plugins/Installed/Languages",
            "caption": "{{$:/language/ControlPanel/Plugins/Languages/Caption}} (<$count filter=\"[!has[draft.of]plugin-type[language]]\"/>)",
            "text": "<<plugin-table language>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Installed/Plugins": {
            "title": "$:/core/ui/ControlPanel/Plugins/Installed/Plugins",
            "caption": "{{$:/language/ControlPanel/Plugins/Plugins/Caption}} (<$count filter=\"[!has[draft.of]plugin-type[plugin]]\"/>)",
            "text": "<<plugin-table plugin>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Installed/Themes": {
            "title": "$:/core/ui/ControlPanel/Plugins/Installed/Themes",
            "caption": "{{$:/language/ControlPanel/Plugins/Themes/Caption}} (<$count filter=\"[!has[draft.of]plugin-type[theme]]\"/>)",
            "text": "<<plugin-table theme>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins": {
            "title": "$:/core/ui/ControlPanel/Plugins",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Plugins/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Plugins/\n\n\\define popup-state-macro()\n$(qualified-state)$-$(currentTiddler)$\n\\end\n\n\\define tabs-state-macro()\n$(popup-state)$-$(pluginInfoType)$\n\\end\n\n\\define plugin-icon-title()\n$(currentTiddler)$/icon\n\\end\n\n\\define plugin-disable-title()\n$:/config/Plugins/Disabled/$(currentTiddler)$\n\\end\n\n\\define plugin-table-body(type,disabledMessage)\n<div class=\"tc-plugin-info-chunk tc-small-icon\">\n<$reveal type=\"nomatch\" state=<<popup-state>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<popup-state>> setTo=\"yes\">\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<popup-state>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<popup-state>> setTo=\"no\">\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<$transclude tiddler=<<currentTiddler>> subtiddler=<<plugin-icon-title>>>\n<$transclude tiddler=\"$:/core/images/plugin-generic-$type$\"/>\n</$transclude>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<h1>\n''<$view field=\"description\"><$view field=\"title\"/></$view>'' $disabledMessage$\n</h1>\n<h2>\n<$view field=\"title\"/>\n</h2>\n<h2>\n<div><em><$view field=\"version\"/></em></div>\n</h2>\n</div>\n\\end\n\n\\define plugin-table(type)\n<$set name=\"qualified-state\" value=<<qualify \"$:/state/plugin-info\">>>\n<$list filter=\"[!has[draft.of]plugin-type[$type$]sort[description]]\" emptyMessage=<<lingo \"Empty/Hint\">>>\n<$set name=\"popup-state\" value=<<popup-state-macro>>>\n<$reveal type=\"nomatch\" state=<<plugin-disable-title>> text=\"yes\">\n<$link to={{!!title}} class=\"tc-plugin-info\">\n<<plugin-table-body type:\"$type$\">>\n</$link>\n</$reveal>\n<$reveal type=\"match\" state=<<plugin-disable-title>> text=\"yes\">\n<$link to={{!!title}} class=\"tc-plugin-info tc-plugin-info-disabled\">\n<<plugin-table-body type:\"$type$\" disabledMessage:\"<$macrocall $name='lingo' title='Disabled/Status'/>\">>\n</$link>\n</$reveal>\n<$reveal type=\"match\" text=\"yes\" state=<<popup-state>>>\n<div class=\"tc-plugin-info-dropdown\">\n<div class=\"tc-plugin-info-dropdown-body\">\n<$list filter=\"[all[current]] -[[$:/core]]\">\n<div style=\"float:right;\">\n<$reveal type=\"nomatch\" state=<<plugin-disable-title>> text=\"yes\">\n<$button set=<<plugin-disable-title>> setTo=\"yes\" tooltip={{$:/language/ControlPanel/Plugins/Disable/Hint}} aria-label={{$:/language/ControlPanel/Plugins/Disable/Caption}}>\n<<lingo Disable/Caption>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<plugin-disable-title>> text=\"yes\">\n<$button set=<<plugin-disable-title>> setTo=\"no\" tooltip={{$:/language/ControlPanel/Plugins/Enable/Hint}} aria-label={{$:/language/ControlPanel/Plugins/Enable/Caption}}>\n<<lingo Enable/Caption>>\n</$button>\n</$reveal>\n</div>\n</$list>\n<$reveal type=\"nomatch\" text=\"\" state=\"!!list\">\n<$macrocall $name=\"tabs\" state=<<tabs-state-macro>> tabsList={{!!list}} default=\"readme\" template=\"$:/core/ui/PluginInfo\"/>\n</$reveal>\n<$reveal type=\"match\" text=\"\" state=\"!!list\">\n<<lingo NoInformation/Hint>>\n</$reveal>\n</div>\n</div>\n</$reveal>\n</$set>\n</$list>\n</$set>\n\\end\n\n{{$:/core/ui/ControlPanel/Plugins/AddPlugins}}\n\n<<lingo Installed/Hint>>\n\n<<tabs \"[[$:/core/ui/ControlPanel/Plugins/Installed/Plugins]] [[$:/core/ui/ControlPanel/Plugins/Installed/Themes]] [[$:/core/ui/ControlPanel/Plugins/Installed/Languages]]\" \"$:/core/ui/ControlPanel/Plugins/Installed/Plugins\">>\n"
        },
        "$:/core/ui/ControlPanel/Saving": {
            "title": "$:/core/ui/ControlPanel/Saving",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Saving/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Saving/\n\\define backupURL()\nhttp://$(userName)$.tiddlyspot.com/backup/\n\\end\n\\define backupLink()\n<$reveal type=\"nomatch\" state=\"$:/UploadName\" text=\"\">\n<$set name=\"userName\" value={{$:/UploadName}}>\n<$reveal type=\"match\" state=\"$:/UploadURL\" text=\"\">\n<<backupURL>>\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/UploadURL\" text=\"\">\n<$macrocall $name=resolvePath source={{$:/UploadBackupDir}} root={{$:/UploadURL}}>>\n</$reveal>\n</$set>\n</$reveal>\n\\end\n! <<lingo TiddlySpot/Heading>>\n\n<<lingo TiddlySpot/Description>>\n\n|<<lingo TiddlySpot/UserName>> |<$edit-text tiddler=\"$:/UploadName\" default=\"\" tag=\"input\"/> |\n|<<lingo TiddlySpot/Password>> |<$password name=\"upload\"/> |\n|<<lingo TiddlySpot/Backups>> |<<backupLink>> |\n\n''<<lingo TiddlySpot/Advanced/Heading>>''\n\n|<<lingo TiddlySpot/ServerURL>>  |<$edit-text tiddler=\"$:/UploadURL\" default=\"\" tag=\"input\"/> |\n|<<lingo TiddlySpot/Filename>> |<$edit-text tiddler=\"$:/UploadFilename\" default=\"index.html\" tag=\"input\"/> |\n|<<lingo TiddlySpot/UploadDir>> |<$edit-text tiddler=\"$:/UploadDir\" default=\".\" tag=\"input\"/> |\n|<<lingo TiddlySpot/BackupDir>> |<$edit-text tiddler=\"$:/UploadBackupDir\" default=\".\" tag=\"input\"/> |\n\n<<lingo TiddlySpot/Hint>>"
        },
        "$:/core/ui/ControlPanel/Settings/AutoSave": {
            "title": "$:/core/ui/ControlPanel/Settings/AutoSave",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/AutoSave/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/AutoSave/\n\n<$link to=\"$:/config/AutoSave\"><<lingo Hint>></$link>\n\n<$radio tiddler=\"$:/config/AutoSave\" value=\"yes\"> <<lingo Enabled/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/AutoSave\" value=\"no\"> <<lingo Disabled/Description>> </$radio>\n"
        },
        "$:/core/buttonstyles/Borderless": {
            "title": "$:/core/buttonstyles/Borderless",
            "tags": "$:/tags/ToolbarButtonStyle",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Borderless}}",
            "text": "tc-btn-invisible"
        },
        "$:/core/buttonstyles/Boxed": {
            "title": "$:/core/buttonstyles/Boxed",
            "tags": "$:/tags/ToolbarButtonStyle",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Boxed}}",
            "text": "tc-btn-boxed"
        },
        "$:/core/buttonstyles/Rounded": {
            "title": "$:/core/buttonstyles/Rounded",
            "tags": "$:/tags/ToolbarButtonStyle",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Rounded}}",
            "text": "tc-btn-rounded"
        },
        "$:/core/ui/ControlPanel/Settings/CamelCase": {
            "title": "$:/core/ui/ControlPanel/Settings/CamelCase",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/CamelCase/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/CamelCase/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/WikiParserRules/Inline/wikilink\" field=\"text\" checked=\"enable\" unchecked=\"disable\" default=\"enable\"> <$link to=\"$:/config/WikiParserRules/Inline/wikilink\"><<lingo Description>></$link> </$checkbox>\n"
        },
        "$:/core/ui/ControlPanel/Settings/DefaultSidebarTab": {
            "caption": "{{$:/language/ControlPanel/Settings/DefaultSidebarTab/Caption}}",
            "tags": "$:/tags/ControlPanel/Settings",
            "title": "$:/core/ui/ControlPanel/Settings/DefaultSidebarTab",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/DefaultSidebarTab/\n\n<$link to=\"$:/config/DefaultSidebarTab\"><<lingo Hint>></$link>\n\n<$select tiddler=\"$:/config/DefaultSidebarTab\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SideBar]!has[draft.of]]\">\n<option value=<<currentTiddler>>><$transclude field=\"caption\"><$text text=<<currentTiddler>>/></$transclude></option>\n</$list>\n</$select>\n"
        },
        "$:/core/ui/ControlPanel/Settings/EditorToolbar": {
            "title": "$:/core/ui/ControlPanel/Settings/EditorToolbar",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/EditorToolbar/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/EditorToolbar/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/TextEditor/EnableToolbar\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"yes\"> <$link to=\"$:/config/TextEditor/EnableToolbar\"><<lingo Description>></$link> </$checkbox>\n\n"
        },
        "$:/core/ui/ControlPanel/Settings/LinkToBehaviour": {
            "title": "$:/core/ui/ControlPanel/Settings/LinkToBehaviour",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/LinkToBehaviour/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/LinkToBehaviour/\n\n<$link to=\"$:/config/Navigation/openLinkFromInsideRiver\"><<lingo \"InsideRiver/Hint\">></$link>\n\n<$select tiddler=\"$:/config/Navigation/openLinkFromInsideRiver\">\n  <option value=\"above\"><<lingo \"OpenAbove\">></option>\n  <option value=\"below\"><<lingo \"OpenBelow\">></option>\n  <option value=\"top\"><<lingo \"OpenAtTop\">></option>\n  <option value=\"bottom\"><<lingo \"OpenAtBottom\">></option>\n</$select>\n\n<$link to=\"$:/config/Navigation/openLinkFromOutsideRiver\"><<lingo \"OutsideRiver/Hint\">></$link>\n\n<$select tiddler=\"$:/config/Navigation/openLinkFromOutsideRiver\">\n  <option value=\"top\"><<lingo \"OpenAtTop\">></option>\n  <option value=\"bottom\"><<lingo \"OpenAtBottom\">></option>\n</$select>\n"
        },
        "$:/core/ui/ControlPanel/Settings/MissingLinks": {
            "title": "$:/core/ui/ControlPanel/Settings/MissingLinks",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/MissingLinks/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/MissingLinks/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/MissingLinks\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"yes\"> <$link to=\"$:/config/MissingLinks\"><<lingo Description>></$link> </$checkbox>\n\n"
        },
        "$:/core/ui/ControlPanel/Settings/NavigationAddressBar": {
            "title": "$:/core/ui/ControlPanel/Settings/NavigationAddressBar",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/NavigationAddressBar/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/NavigationAddressBar/\n\n<$link to=\"$:/config/Navigation/UpdateAddressBar\"><<lingo Hint>></$link>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateAddressBar\" value=\"permaview\"> <<lingo Permaview/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateAddressBar\" value=\"permalink\"> <<lingo Permalink/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateAddressBar\" value=\"no\"> <<lingo No/Description>> </$radio>\n"
        },
        "$:/core/ui/ControlPanel/Settings/NavigationHistory": {
            "title": "$:/core/ui/ControlPanel/Settings/NavigationHistory",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/NavigationHistory/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/NavigationHistory/\n<$link to=\"$:/config/Navigation/UpdateHistory\"><<lingo Hint>></$link>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateHistory\" value=\"yes\"> <<lingo Yes/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateHistory\" value=\"no\"> <<lingo No/Description>> </$radio>\n"
        },
        "$:/core/ui/ControlPanel/Settings/PerformanceInstrumentation": {
            "title": "$:/core/ui/ControlPanel/Settings/PerformanceInstrumentation",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/PerformanceInstrumentation/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/PerformanceInstrumentation/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/Performance/Instrumentation\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"no\"> <$link to=\"$:/config/Performance/Instrumentation\"><<lingo Description>></$link> </$checkbox>\n"
        },
        "$:/core/ui/ControlPanel/Settings/TitleLinks": {
            "title": "$:/core/ui/ControlPanel/Settings/TitleLinks",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/TitleLinks/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/TitleLinks/\n<$link to=\"$:/config/Tiddlers/TitleLinks\"><<lingo Hint>></$link>\n\n<$radio tiddler=\"$:/config/Tiddlers/TitleLinks\" value=\"yes\"> <<lingo Yes/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/Tiddlers/TitleLinks\" value=\"no\"> <<lingo No/Description>> </$radio>\n"
        },
        "$:/core/ui/ControlPanel/Settings/ToolbarButtons": {
            "title": "$:/core/ui/ControlPanel/Settings/ToolbarButtons",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtons/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/ToolbarButtons/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/Toolbar/Icons\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"yes\"> <$link to=\"$:/config/Toolbar/Icons\"><<lingo Icons/Description>></$link> </$checkbox>\n\n<$checkbox tiddler=\"$:/config/Toolbar/Text\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"no\"> <$link to=\"$:/config/Toolbar/Text\"><<lingo Text/Description>></$link> </$checkbox>\n"
        },
        "$:/core/ui/ControlPanel/Settings/ToolbarButtonStyle": {
            "title": "$:/core/ui/ControlPanel/Settings/ToolbarButtonStyle",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtonStyle/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/ToolbarButtonStyle/\n<$link to=\"$:/config/Toolbar/ButtonClass\"><<lingo \"Hint\">></$link>\n\n<$select tiddler=\"$:/config/Toolbar/ButtonClass\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ToolbarButtonStyle]]\">\n<option value={{!!text}}>{{!!caption}}</option>\n</$list>\n</$select>\n"
        },
        "$:/core/ui/ControlPanel/Settings": {
            "title": "$:/core/ui/ControlPanel/Settings",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Settings/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/\n\n<<lingo Hint>>\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Settings]]\">\n\n<div style=\"border-top:1px solid #eee;\">\n\n!! <$link><$transclude field=\"caption\"/></$link>\n\n<$transclude/>\n\n</div>\n\n</$list>\n"
        },
        "$:/core/ui/ControlPanel/StoryView": {
            "title": "$:/core/ui/ControlPanel/StoryView",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ControlPanel/StoryView/Caption}}",
            "text": "{{$:/snippets/viewswitcher}}\n"
        },
        "$:/core/ui/ControlPanel/Theme": {
            "title": "$:/core/ui/ControlPanel/Theme",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ControlPanel/Theme/Caption}}",
            "text": "{{$:/snippets/themeswitcher}}\n"
        },
        "$:/core/ui/ControlPanel/TiddlerFields": {
            "title": "$:/core/ui/ControlPanel/TiddlerFields",
            "tags": "$:/tags/ControlPanel/Advanced",
            "caption": "{{$:/language/ControlPanel/TiddlerFields/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/\n\n<<lingo TiddlerFields/Hint>>\n\n{{$:/snippets/allfields}}"
        },
        "$:/core/ui/ControlPanel/Toolbars/EditorToolbar": {
            "title": "$:/core/ui/ControlPanel/Toolbars/EditorToolbar",
            "tags": "$:/tags/ControlPanel/Toolbars",
            "caption": "{{$:/language/ControlPanel/Toolbars/EditorToolbar/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\n\\define config-title()\n$:/config/EditorToolbarButtons/Visibility/$(listItem)$\n\\end\n\n\\define toolbar-button()\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"> <$transclude tiddler={{$(listItem)$!!icon}}/> <$transclude tiddler=<<listItem>> field=\"caption\"/> -- <i class=\"tc-muted\"><$transclude tiddler=<<listItem>> field=\"description\"/></i></$checkbox>\n\\end\n\n{{$:/language/ControlPanel/Toolbars/EditorToolbar/Hint}}\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditorToolbar]!has[draft.of]]\" variable=\"listItem\">\n\n<<toolbar-button>>\n\n</$list>\n"
        },
        "$:/core/ui/ControlPanel/Toolbars/EditToolbar": {
            "title": "$:/core/ui/ControlPanel/Toolbars/EditToolbar",
            "tags": "$:/tags/ControlPanel/Toolbars",
            "caption": "{{$:/language/ControlPanel/Toolbars/EditToolbar/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\\define config-title()\n$:/config/EditToolbarButtons/Visibility/$(listItem)$\n\\end\n\n{{$:/language/ControlPanel/Toolbars/EditToolbar/Hint}}\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditToolbar]!has[draft.of]]\" variable=\"listItem\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>> field=\"caption\"/> <i class=\"tc-muted\">-- <$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</$list>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/ControlPanel/Toolbars/PageControls": {
            "title": "$:/core/ui/ControlPanel/Toolbars/PageControls",
            "tags": "$:/tags/ControlPanel/Toolbars",
            "caption": "{{$:/language/ControlPanel/Toolbars/PageControls/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\\define config-title()\n$:/config/PageControlButtons/Visibility/$(listItem)$\n\\end\n\n{{$:/language/ControlPanel/Toolbars/PageControls/Hint}}\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]]\" variable=\"listItem\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>> field=\"caption\"/> <i class=\"tc-muted\">-- <$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</$list>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/ControlPanel/Toolbars/ViewToolbar": {
            "title": "$:/core/ui/ControlPanel/Toolbars/ViewToolbar",
            "tags": "$:/tags/ControlPanel/Toolbars",
            "caption": "{{$:/language/ControlPanel/Toolbars/ViewToolbar/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\\define config-title()\n$:/config/ViewToolbarButtons/Visibility/$(listItem)$\n\\end\n\n{{$:/language/ControlPanel/Toolbars/ViewToolbar/Hint}}\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]]\" variable=\"listItem\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>> field=\"caption\"/> <i class=\"tc-muted\">-- <$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</$list>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/ControlPanel/Toolbars": {
            "title": "$:/core/ui/ControlPanel/Toolbars",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ControlPanel/Toolbars/Caption}}",
            "text": "{{$:/language/ControlPanel/Toolbars/Hint}}\n\n<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Toolbars]!has[draft.of]]\" \"$:/core/ui/ControlPanel/Toolbars/ViewToolbar\" \"$:/state/tabs/controlpanel/toolbars\" \"tc-vertical\">>\n</div>\n"
        },
        "$:/ControlPanel": {
            "title": "$:/ControlPanel",
            "icon": "$:/core/images/options-button",
            "color": "#bbb",
            "text": "<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel]!has[draft.of]]\" \"$:/core/ui/ControlPanel/Info\">>\n</div>\n"
        },
        "$:/core/ui/DefaultSearchResultList": {
            "title": "$:/core/ui/DefaultSearchResultList",
            "tags": "$:/tags/SearchResults",
            "caption": "{{$:/language/Search/DefaultResults/Caption}}",
            "text": "\\define searchResultList()\n//<small>{{$:/language/Search/Matches/Title}}</small>//\n\n<$list filter=\"[!is[system]search:title{$(searchTiddler)$}sort[title]limit[250]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n\n//<small>{{$:/language/Search/Matches/All}}</small>//\n\n<$list filter=\"[!is[system]search{$(searchTiddler)$}sort[title]limit[250]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n\n\\end\n<<searchResultList>>\n"
        },
        "$:/core/ui/EditorToolbar/bold": {
            "title": "$:/core/ui/EditorToolbar/bold",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/bold",
            "caption": "{{$:/language/Buttons/Bold/Caption}}",
            "description": "{{$:/language/Buttons/Bold/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((bold))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"''\"\n\tsuffix=\"''\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/clear-dropdown": {
            "title": "$:/core/ui/EditorToolbar/clear-dropdown",
            "text": "''{{$:/language/Buttons/Clear/Hint}}''\n\n<div class=\"tc-colour-chooser\">\n\n<$macrocall $name=\"colour-picker\" actions=\"\"\"\n\n<$action-sendmessage\n\t$message=\"tm-edit-bitmap-operation\"\n\t$param=\"clear\"\n\tcolour=<<colour-picker-value>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n\"\"\"/>\n\n</div>\n"
        },
        "$:/core/ui/EditorToolbar/clear": {
            "title": "$:/core/ui/EditorToolbar/clear",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/erase",
            "caption": "{{$:/language/Buttons/Clear/Caption}}",
            "description": "{{$:/language/Buttons/Clear/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/clear-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/editor-height-dropdown": {
            "title": "$:/core/ui/EditorToolbar/editor-height-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/EditorHeight/\n''<<lingo Hint>>''\n\n<$radio tiddler=\"$:/config/TextEditor/EditorHeight/Mode\" value=\"auto\"> {{$:/core/images/auto-height}} <<lingo Caption/Auto>></$radio>\n\n<$radio tiddler=\"$:/config/TextEditor/EditorHeight/Mode\" value=\"fixed\"> {{$:/core/images/fixed-height}} <<lingo Caption/Fixed>> <$edit-text tag=\"input\" tiddler=\"$:/config/TextEditor/EditorHeight/Height\" default=\"100px\"/></$radio>\n"
        },
        "$:/core/ui/EditorToolbar/editor-height": {
            "title": "$:/core/ui/EditorToolbar/editor-height",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/fixed-height",
            "custom-icon": "yes",
            "caption": "{{$:/language/Buttons/EditorHeight/Caption}}",
            "description": "{{$:/language/Buttons/EditorHeight/Hint}}",
            "condition": "[<targetTiddler>!is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/editor-height-dropdown",
            "text": "<$reveal tag=\"span\" state=\"$:/config/TextEditor/EditorHeight/Mode\" type=\"match\" text=\"fixed\">\n{{$:/core/images/fixed-height}}\n</$reveal>\n<$reveal tag=\"span\" state=\"$:/config/TextEditor/EditorHeight/Mode\" type=\"match\" text=\"auto\">\n{{$:/core/images/auto-height}}\n</$reveal>\n"
        },
        "$:/core/ui/EditorToolbar/excise-dropdown": {
            "title": "$:/core/ui/EditorToolbar/excise-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/Excise/\n\n\\define body(config-title)\n''<<lingo Hint>>''\n\n<<lingo Caption/NewTitle>> <$edit-text tag=\"input\" tiddler=\"$config-title$/new-title\" default=\"\" focus=\"true\"/>\n\n<$set name=\"new-title\" value={{$config-title$/new-title}}>\n<$list filter=\"\"\"[<new-title>is[tiddler]]\"\"\">\n<div class=\"tc-error\">\n<<lingo Caption/TiddlerExists>>\n</div>\n</$list>\n</$set>\n\n<$checkbox tiddler=\"\"\"$config-title$/tagnew\"\"\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"false\"> <<lingo Caption/Tag>></$checkbox>\n\n<<lingo Caption/Replace>> <$select tiddler=\"\"\"$config-title$/type\"\"\" default=\"transclude\">\n<option value=\"link\"><<lingo Caption/Replace/Link>></option>\n<option value=\"transclude\"><<lingo Caption/Replace/Transclusion>></option>\n<option value=\"macro\"><<lingo Caption/Replace/Macro>></option>\n</$select>\n\n<$reveal state=\"\"\"$config-title$/type\"\"\" type=\"match\" text=\"macro\">\n<<lingo Caption/MacroName>> <$edit-text tag=\"input\" tiddler=\"\"\"$config-title$/macro-title\"\"\" default=\"translink\"/>\n</$reveal>\n\n<$button>\n<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"excise\"\n\ttitle={{$config-title$/new-title}}\n\ttype={{$config-title$/type}}\n\tmacro={{$config-title$/macro-title}}\n\ttagnew={{$config-title$/tagnew}}\n/>\n<$action-deletetiddler\n\t$tiddler=<<qualify \"$:/state/Excise/NewTitle\">>\n/>\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n<<lingo Caption/Excise>>\n</$button>\n\\end\n\n<$macrocall $name=\"body\" config-title=<<qualify \"$:/state/Excise/\">>/>\n"
        },
        "$:/core/ui/EditorToolbar/excise": {
            "title": "$:/core/ui/EditorToolbar/excise",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/excise",
            "caption": "{{$:/language/Buttons/Excise/Caption}}",
            "description": "{{$:/language/Buttons/Excise/Hint}}",
            "condition": "[<targetTiddler>!is[image]]",
            "shortcuts": "((excise))",
            "dropdown": "$:/core/ui/EditorToolbar/excise-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/heading-1": {
            "title": "$:/core/ui/EditorToolbar/heading-1",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-1",
            "caption": "{{$:/language/Buttons/Heading1/Caption}}",
            "description": "{{$:/language/Buttons/Heading1/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "button-classes": "tc-text-editor-toolbar-item-start-group",
            "shortcuts": "((heading-1))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"1\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-2": {
            "title": "$:/core/ui/EditorToolbar/heading-2",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-2",
            "caption": "{{$:/language/Buttons/Heading2/Caption}}",
            "description": "{{$:/language/Buttons/Heading2/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-2))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"2\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-3": {
            "title": "$:/core/ui/EditorToolbar/heading-3",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-3",
            "caption": "{{$:/language/Buttons/Heading3/Caption}}",
            "description": "{{$:/language/Buttons/Heading3/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-3))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"3\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-4": {
            "title": "$:/core/ui/EditorToolbar/heading-4",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-4",
            "caption": "{{$:/language/Buttons/Heading4/Caption}}",
            "description": "{{$:/language/Buttons/Heading4/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-4))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"4\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-5": {
            "title": "$:/core/ui/EditorToolbar/heading-5",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-5",
            "caption": "{{$:/language/Buttons/Heading5/Caption}}",
            "description": "{{$:/language/Buttons/Heading5/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-5))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"5\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-6": {
            "title": "$:/core/ui/EditorToolbar/heading-6",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-6",
            "caption": "{{$:/language/Buttons/Heading6/Caption}}",
            "description": "{{$:/language/Buttons/Heading6/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-6))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"6\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/italic": {
            "title": "$:/core/ui/EditorToolbar/italic",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/italic",
            "caption": "{{$:/language/Buttons/Italic/Caption}}",
            "description": "{{$:/language/Buttons/Italic/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((italic))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"//\"\n\tsuffix=\"//\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/line-width-dropdown": {
            "title": "$:/core/ui/EditorToolbar/line-width-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/LineWidth/\n\n\\define toolbar-line-width-inner()\n<$button tag=\"a\" tooltip=\"\"\"$(line-width)$\"\"\">\n\n<$action-setfield\n\t$tiddler=\"$:/config/BitmapEditor/LineWidth\"\n\t$value=\"$(line-width)$\"\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<div style=\"display: inline-block; margin: 4px calc(80px - $(line-width)$); background-color: #000; width: calc(100px + $(line-width)$ * 2); height: $(line-width)$; border-radius: 120px; vertical-align: middle;\"/>\n\n<span style=\"margin-left: 8px;\">\n\n<$text text=\"\"\"$(line-width)$\"\"\"/>\n\n<$reveal state=\"$:/config/BitmapEditor/LineWidth\" type=\"match\" text=\"\"\"$(line-width)$\"\"\" tag=\"span\">\n\n<$entity entity=\"&nbsp;\"/>\n\n<$entity entity=\"&#x2713;\"/>\n\n</$reveal>\n\n</span>\n\n</$button>\n\\end\n\n''<<lingo Hint>>''\n\n<$list filter={{$:/config/BitmapEditor/LineWidths}} variable=\"line-width\">\n\n<<toolbar-line-width-inner>>\n\n</$list>\n"
        },
        "$:/core/ui/EditorToolbar/line-width": {
            "title": "$:/core/ui/EditorToolbar/line-width",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/line-width",
            "caption": "{{$:/language/Buttons/LineWidth/Caption}}",
            "description": "{{$:/language/Buttons/LineWidth/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/line-width-dropdown",
            "text": "<$text text={{$:/config/BitmapEditor/LineWidth}}/>"
        },
        "$:/core/ui/EditorToolbar/link-dropdown": {
            "title": "$:/core/ui/EditorToolbar/link-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/Link/\n\n\\define link-actions()\n<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"make-link\"\n\ttext={{$(linkTiddler)$}}\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<searchTiddler>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<linkTiddler>>\n/>\n\\end\n\n\\define body(config-title)\n''<<lingo Hint>>''\n\n<$vars searchTiddler=\"\"\"$config-title$/search\"\"\" linkTiddler=\"\"\"$config-title$/link\"\"\">\n\n<$edit-text tiddler=<<searchTiddler>> type=\"search\" tag=\"input\" focus=\"true\" placeholder={{$:/language/Search/Search}} default=\"\"/>\n<$reveal tag=\"span\" state=<<searchTiddler>> type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\" style=\"width: auto; display: inline-block; background-colour: inherit;\">\n<$action-setfield $tiddler=<<searchTiddler>> text=\"\" />\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n\n<$reveal tag=\"div\" state=<<searchTiddler>> type=\"nomatch\" text=\"\">\n\n<$linkcatcher actions=<<link-actions>> to=<<linkTiddler>>>\n\n{{$:/core/ui/SearchResults}}\n\n</$linkcatcher>\n\n</$reveal>\n\n</$vars>\n\n\\end\n\n<$macrocall $name=\"body\" config-title=<<qualify \"$:/state/Link/\">>/>\n"
        },
        "$:/core/ui/EditorToolbar/link": {
            "title": "$:/core/ui/EditorToolbar/link",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/link",
            "caption": "{{$:/language/Buttons/Link/Caption}}",
            "description": "{{$:/language/Buttons/Link/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "button-classes": "tc-text-editor-toolbar-item-start-group",
            "shortcuts": "((link))",
            "dropdown": "$:/core/ui/EditorToolbar/link-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/list-bullet": {
            "title": "$:/core/ui/EditorToolbar/list-bullet",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/list-bullet",
            "caption": "{{$:/language/Buttons/ListBullet/Caption}}",
            "description": "{{$:/language/Buttons/ListBullet/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((list-bullet))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"*\"\n\tcount=\"1\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/list-number": {
            "title": "$:/core/ui/EditorToolbar/list-number",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/list-number",
            "caption": "{{$:/language/Buttons/ListNumber/Caption}}",
            "description": "{{$:/language/Buttons/ListNumber/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((list-number))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"#\"\n\tcount=\"1\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/mono-block": {
            "title": "$:/core/ui/EditorToolbar/mono-block",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/mono-block",
            "caption": "{{$:/language/Buttons/MonoBlock/Caption}}",
            "description": "{{$:/language/Buttons/MonoBlock/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "button-classes": "tc-text-editor-toolbar-item-start-group",
            "shortcuts": "((mono-block))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-lines\"\n\tprefix=\"\n```\"\n\tsuffix=\"```\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/mono-line": {
            "title": "$:/core/ui/EditorToolbar/mono-line",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/mono-line",
            "caption": "{{$:/language/Buttons/MonoLine/Caption}}",
            "description": "{{$:/language/Buttons/MonoLine/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((mono-line))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"`\"\n\tsuffix=\"`\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/more-dropdown": {
            "title": "$:/core/ui/EditorToolbar/more-dropdown",
            "text": "\\define config-title()\n$:/config/EditorToolbarButtons/Visibility/$(toolbarItem)$\n\\end\n\n\\define conditional-button()\n<$list filter={{$(toolbarItem)$!!condition}} variable=\"condition\">\n<$transclude tiddler=\"$:/core/ui/EditTemplate/body/toolbar/button\" mode=\"inline\"/> <$transclude tiddler=<<toolbarItem>> field=\"description\"/>\n</$list>\n\\end\n\n<div class=\"tc-text-editor-toolbar-more\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditorToolbar]!has[draft.of]] -[[$:/core/ui/EditorToolbar/more]]\">\n<$reveal type=\"match\" state=<<config-visibility-title>> text=\"hide\" tag=\"div\">\n<<conditional-button>>\n</$reveal>\n</$list>\n</div>\n"
        },
        "$:/core/ui/EditorToolbar/more": {
            "title": "$:/core/ui/EditorToolbar/more",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/down-arrow",
            "caption": "{{$:/language/Buttons/More/Caption}}",
            "description": "{{$:/language/Buttons/More/Hint}}",
            "condition": "[<targetTiddler>]",
            "dropdown": "$:/core/ui/EditorToolbar/more-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/opacity-dropdown": {
            "title": "$:/core/ui/EditorToolbar/opacity-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/Opacity/\n\n\\define toolbar-opacity-inner()\n<$button tag=\"a\" tooltip=\"\"\"$(opacity)$\"\"\">\n\n<$action-setfield\n\t$tiddler=\"$:/config/BitmapEditor/Opacity\"\n\t$value=\"$(opacity)$\"\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<div style=\"display: inline-block; vertical-align: middle; background-color: $(current-paint-colour)$; opacity: $(opacity)$; width: 1em; height: 1em; border-radius: 50%;\"/>\n\n<span style=\"margin-left: 8px;\">\n\n<$text text=\"\"\"$(opacity)$\"\"\"/>\n\n<$reveal state=\"$:/config/BitmapEditor/Opacity\" type=\"match\" text=\"\"\"$(opacity)$\"\"\" tag=\"span\">\n\n<$entity entity=\"&nbsp;\"/>\n\n<$entity entity=\"&#x2713;\"/>\n\n</$reveal>\n\n</span>\n\n</$button>\n\\end\n\n\\define toolbar-opacity()\n''<<lingo Hint>>''\n\n<$list filter={{$:/config/BitmapEditor/Opacities}} variable=\"opacity\">\n\n<<toolbar-opacity-inner>>\n\n</$list>\n\\end\n\n<$set name=\"current-paint-colour\" value={{$:/config/BitmapEditor/Colour}}>\n\n<$set name=\"current-opacity\" value={{$:/config/BitmapEditor/Opacity}}>\n\n<<toolbar-opacity>>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/EditorToolbar/opacity": {
            "title": "$:/core/ui/EditorToolbar/opacity",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/opacity",
            "caption": "{{$:/language/Buttons/Opacity/Caption}}",
            "description": "{{$:/language/Buttons/Opacity/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/opacity-dropdown",
            "text": "<$text text={{$:/config/BitmapEditor/Opacity}}/>\n"
        },
        "$:/core/ui/EditorToolbar/paint-dropdown": {
            "title": "$:/core/ui/EditorToolbar/paint-dropdown",
            "text": "''{{$:/language/Buttons/Paint/Hint}}''\n\n<$macrocall $name=\"colour-picker\" actions=\"\"\"\n\n<$action-setfield\n\t$tiddler=\"$:/config/BitmapEditor/Colour\"\n\t$value=<<colour-picker-value>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n\"\"\"/>\n"
        },
        "$:/core/ui/EditorToolbar/paint": {
            "title": "$:/core/ui/EditorToolbar/paint",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/paint",
            "caption": "{{$:/language/Buttons/Paint/Caption}}",
            "description": "{{$:/language/Buttons/Paint/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/paint-dropdown",
            "text": "\\define toolbar-paint()\n<div style=\"display: inline-block; vertical-align: middle; background-color: $(colour-picker-value)$; width: 1em; height: 1em; border-radius: 50%;\"/>\n\\end\n<$set name=\"colour-picker-value\" value={{$:/config/BitmapEditor/Colour}}>\n<<toolbar-paint>>\n</$set>\n"
        },
        "$:/core/ui/EditorToolbar/picture-dropdown": {
            "title": "$:/core/ui/EditorToolbar/picture-dropdown",
            "text": "\\define replacement-text()\n[img[$(imageTitle)$]]\n\\end\n\n''{{$:/language/Buttons/Picture/Hint}}''\n\n<$macrocall $name=\"image-picker\" actions=\"\"\"\n\n<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"replace-selection\"\n\ttext=<<replacement-text>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n\"\"\"/>\n"
        },
        "$:/core/ui/EditorToolbar/picture": {
            "title": "$:/core/ui/EditorToolbar/picture",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/picture",
            "caption": "{{$:/language/Buttons/Picture/Caption}}",
            "description": "{{$:/language/Buttons/Picture/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((picture))",
            "dropdown": "$:/core/ui/EditorToolbar/picture-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/preview-type-dropdown": {
            "title": "$:/core/ui/EditorToolbar/preview-type-dropdown",
            "text": "\\define preview-type-button()\n<$button tag=\"a\">\n\n<$action-setfield $tiddler=\"$:/state/editpreviewtype\" $value=\"$(previewType)$\"/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<$transclude tiddler=<<previewType>> field=\"caption\" mode=\"inline\">\n\n<$view tiddler=<<previewType>> field=\"title\" mode=\"inline\"/>\n\n</$transclude> \n\n<$reveal tag=\"span\" state=\"$:/state/editpreviewtype\" type=\"match\" text=<<previewType>> default=\"$:/core/ui/EditTemplate/body/preview/output\">\n\n<$entity entity=\"&nbsp;\"/>\n\n<$entity entity=\"&#x2713;\"/>\n\n</$reveal>\n\n</$button>\n\\end\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditPreview]!has[draft.of]]\" variable=\"previewType\">\n\n<<preview-type-button>>\n\n</$list>\n"
        },
        "$:/core/ui/EditorToolbar/preview-type": {
            "title": "$:/core/ui/EditorToolbar/preview-type",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/chevron-down",
            "caption": "{{$:/language/Buttons/PreviewType/Caption}}",
            "description": "{{$:/language/Buttons/PreviewType/Hint}}",
            "condition": "[all[shadows+tiddlers]tag[$:/tags/EditPreview]!has[draft.of]butfirst[]limit[1]]",
            "button-classes": "tc-text-editor-toolbar-item-adjunct",
            "dropdown": "$:/core/ui/EditorToolbar/preview-type-dropdown"
        },
        "$:/core/ui/EditorToolbar/preview": {
            "title": "$:/core/ui/EditorToolbar/preview",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/preview-open",
            "custom-icon": "yes",
            "caption": "{{$:/language/Buttons/Preview/Caption}}",
            "description": "{{$:/language/Buttons/Preview/Hint}}",
            "condition": "[<targetTiddler>]",
            "button-classes": "tc-text-editor-toolbar-item-start-group",
            "shortcuts": "((preview))",
            "text": "<$reveal state=\"$:/state/showeditpreview\" type=\"match\" text=\"yes\" tag=\"span\">\n{{$:/core/images/preview-open}}\n<$action-setfield $tiddler=\"$:/state/showeditpreview\" $value=\"no\"/>\n</$reveal>\n<$reveal state=\"$:/state/showeditpreview\" type=\"nomatch\" text=\"yes\" tag=\"span\">\n{{$:/core/images/preview-closed}}\n<$action-setfield $tiddler=\"$:/state/showeditpreview\" $value=\"yes\"/>\n</$reveal>\n"
        },
        "$:/core/ui/EditorToolbar/quote": {
            "title": "$:/core/ui/EditorToolbar/quote",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/quote",
            "caption": "{{$:/language/Buttons/Quote/Caption}}",
            "description": "{{$:/language/Buttons/Quote/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((quote))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-lines\"\n\tprefix=\"\n<<<\"\n\tsuffix=\"<<<\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/size-dropdown": {
            "title": "$:/core/ui/EditorToolbar/size-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/Size/\n\n\\define toolbar-button-size-preset(config-title)\n<$set name=\"width\" filter=\"$(sizePair)$ +[first[]]\">\n\n<$set name=\"height\" filter=\"$(sizePair)$ +[last[]]\">\n\n<$button tag=\"a\">\n\n<$action-setfield\n\t$tiddler=\"\"\"$config-title$/new-width\"\"\"\n\t$value=<<width>>\n/>\n\n<$action-setfield\n\t$tiddler=\"\"\"$config-title$/new-height\"\"\"\n\t$value=<<height>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=\"\"\"$config-title$/presets-popup\"\"\"\n/>\n\n<$text text=<<width>>/> &times; <$text text=<<height>>/>\n\n</$button>\n\n</$set>\n\n</$set>\n\\end\n\n\\define toolbar-button-size(config-title)\n''{{$:/language/Buttons/Size/Hint}}''\n\n<<lingo Caption/Width>> <$edit-text tag=\"input\" tiddler=\"\"\"$config-title$/new-width\"\"\" default=<<tv-bitmap-editor-width>> focus=\"true\" size=\"8\"/> <<lingo Caption/Height>> <$edit-text tag=\"input\" tiddler=\"\"\"$config-title$/new-height\"\"\" default=<<tv-bitmap-editor-height>> size=\"8\"/> <$button popup=\"\"\"$config-title$/presets-popup\"\"\" class=\"tc-btn-invisible tc-popup-keep\" style=\"width: auto; display: inline-block; background-colour: inherit;\" selectedClass=\"tc-selected\">\n{{$:/core/images/down-arrow}}\n</$button>\n\n<$reveal tag=\"span\" state=\"\"\"$config-title$/presets-popup\"\"\" type=\"popup\" position=\"belowleft\" animate=\"yes\">\n\n<div class=\"tc-drop-down tc-popup-keep\">\n\n<$list filter={{$:/config/BitmapEditor/ImageSizes}} variable=\"sizePair\">\n\n<$macrocall $name=\"toolbar-button-size-preset\" config-title=\"$config-title$\"/>\n\n</$list>\n\n</div>\n\n</$reveal>\n\n<$button>\n<$action-sendmessage\n\t$message=\"tm-edit-bitmap-operation\"\n\t$param=\"resize\"\n\twidth={{$config-title$/new-width}}\n\theight={{$config-title$/new-height}}\n/>\n<$action-deletetiddler\n\t$tiddler=\"\"\"$config-title$/new-width\"\"\"\n/>\n<$action-deletetiddler\n\t$tiddler=\"\"\"$config-title$/new-height\"\"\"\n/>\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n<<lingo Caption/Resize>>\n</$button>\n\\end\n\n<$macrocall $name=\"toolbar-button-size\" config-title=<<qualify \"$:/state/Size/\">>/>\n"
        },
        "$:/core/ui/EditorToolbar/size": {
            "title": "$:/core/ui/EditorToolbar/size",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/size",
            "caption": "{{$:/language/Buttons/Size/Caption}}",
            "description": "{{$:/language/Buttons/Size/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/size-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/stamp-dropdown": {
            "title": "$:/core/ui/EditorToolbar/stamp-dropdown",
            "text": "\\define toolbar-button-stamp-inner()\n<$button tag=\"a\">\n\n<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"replace-selection\"\n\ttext={{$(snippetTitle)$}}\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<$view tiddler=<<snippetTitle>> field=\"caption\" mode=\"inline\">\n\n<$view tiddler=<<snippetTitle>> field=\"title\" mode=\"inline\"/>\n\n</$view>\n\n</$button>\n\\end\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TextEditor/Snippet]!has[draft.of]sort[caption]]\" variable=\"snippetTitle\">\n\n<<toolbar-button-stamp-inner>>\n\n</$list>\n\n----\n\n<$button tag=\"a\">\n\n<$action-sendmessage\n\t$message=\"tm-new-tiddler\"\n\ttags=\"$:/tags/TextEditor/Snippet\"\n\tcaption={{$:/language/Buttons/Stamp/New/Title}}\n\ttext={{$:/language/Buttons/Stamp/New/Text}}\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<em>\n\n<$text text={{$:/language/Buttons/Stamp/Caption/New}}/>\n\n</em>\n\n</$button>\n"
        },
        "$:/core/ui/EditorToolbar/stamp": {
            "title": "$:/core/ui/EditorToolbar/stamp",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/stamp",
            "caption": "{{$:/language/Buttons/Stamp/Caption}}",
            "description": "{{$:/language/Buttons/Stamp/Hint}}",
            "condition": "[<targetTiddler>!is[image]]",
            "shortcuts": "((stamp))",
            "dropdown": "$:/core/ui/EditorToolbar/stamp-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/strikethrough": {
            "title": "$:/core/ui/EditorToolbar/strikethrough",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/strikethrough",
            "caption": "{{$:/language/Buttons/Strikethrough/Caption}}",
            "description": "{{$:/language/Buttons/Strikethrough/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((strikethrough))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"~~\"\n\tsuffix=\"~~\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/subscript": {
            "title": "$:/core/ui/EditorToolbar/subscript",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/subscript",
            "caption": "{{$:/language/Buttons/Subscript/Caption}}",
            "description": "{{$:/language/Buttons/Subscript/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((subscript))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\",,\"\n\tsuffix=\",,\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/superscript": {
            "title": "$:/core/ui/EditorToolbar/superscript",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/superscript",
            "caption": "{{$:/language/Buttons/Superscript/Caption}}",
            "description": "{{$:/language/Buttons/Superscript/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((superscript))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"^^\"\n\tsuffix=\"^^\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/underline": {
            "title": "$:/core/ui/EditorToolbar/underline",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/underline",
            "caption": "{{$:/language/Buttons/Underline/Caption}}",
            "description": "{{$:/language/Buttons/Underline/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((underline))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"__\"\n\tsuffix=\"__\"\n/>\n"
        },
        "$:/core/ui/EditTemplate/body/editor": {
            "title": "$:/core/ui/EditTemplate/body/editor",
            "text": "<$edit\n\n  field=\"text\"\n  class=\"tc-edit-texteditor\"\n  placeholder={{$:/language/EditTemplate/Body/Placeholder}}\n\n><$set\n\n  name=\"targetTiddler\"\n  value=<<currentTiddler>>\n\n><$list\n\n  filter=\"[all[shadows+tiddlers]tag[$:/tags/EditorToolbar]!has[draft.of]]\"\n\n><$reveal\n\n  type=\"nomatch\"\n  state=<<config-visibility-title>>\n  text=\"hide\"\n  class=\"tc-text-editor-toolbar-item-wrapper\"\n\n><$transclude\n\n  tiddler=\"$:/core/ui/EditTemplate/body/toolbar/button\"\n  mode=\"inline\"\n\n/></$reveal></$list></$set></$edit>\n"
        },
        "$:/core/ui/EditTemplate/body/toolbar/button": {
            "title": "$:/core/ui/EditTemplate/body/toolbar/button",
            "text": "\\define toolbar-button-icon()\n<$list\n\n  filter=\"[all[current]!has[custom-icon]]\"\n  variable=\"no-custom-icon\"\n\n><$transclude\n\n  tiddler={{!!icon}}\n\n/></$list>\n\\end\n\n\\define toolbar-button-tooltip()\n{{!!description}}<$macrocall $name=\"displayshortcuts\" $output=\"text/plain\" shortcuts={{!!shortcuts}} prefix=\"` - [\" separator=\"] [\" suffix=\"]`\"/>\n\\end\n\n\\define toolbar-button()\n<$list\n\n  filter={{!!condition}}\n  variable=\"list-condition\"\n\n><$wikify\n\n  name=\"tooltip-text\"\n  text=<<toolbar-button-tooltip>>\n  mode=\"inline\"\n  output=\"text\"\n\n><$list\n\n  filter=\"[all[current]!has[dropdown]]\"\n  variable=\"no-dropdown\"\n\n><$button\n\n  class=\"tc-btn-invisible $(buttonClasses)$\"\n  tooltip=<<tooltip-text>>\n\n><span\n\n  data-tw-keyboard-shortcut={{!!shortcuts}}\n\n/><<toolbar-button-icon>><$transclude\n\n  tiddler=<<currentTiddler>>\n  field=\"text\"\n\n/></$button></$list><$list\n\n  filter=\"[all[current]has[dropdown]]\"\n  variable=\"dropdown\"\n\n><$set\n\n  name=\"dropdown-state\"\n  value=<<qualify \"$:/state/EditorToolbarDropdown\">>\n\n><$button\n\n  popup=<<dropdown-state>>\n  class=\"tc-popup-keep tc-btn-invisible $(buttonClasses)$\"\n  selectedClass=\"tc-selected\"\n  tooltip=<<tooltip-text>>\n\n><span\n\n  data-tw-keyboard-shortcut={{!!shortcuts}}\n\n/><<toolbar-button-icon>><$transclude\n\n  tiddler=<<currentTiddler>>\n  field=\"text\"\n\n/></$button><$reveal\n\n  state=<<dropdown-state>>\n  type=\"popup\"\n  position=\"below\"\n  animate=\"yes\"\n  tag=\"span\"\n\n><div\n\n  class=\"tc-drop-down tc-popup-keep\"\n\n><$transclude\n\n  tiddler={{!!dropdown}}\n  mode=\"block\"\n\n/></div></$reveal></$set></$list></$wikify></$list>\n\\end\n\n\\define toolbar-button-outer()\n<$set\n\n  name=\"buttonClasses\"\n  value={{!!button-classes}}\n\n><<toolbar-button>></$set>\n\\end\n\n<<toolbar-button-outer>>"
        },
        "$:/core/ui/EditTemplate/body": {
            "title": "$:/core/ui/EditTemplate/body",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/Body/\n\\define config-visibility-title()\n$:/config/EditorToolbarButtons/Visibility/$(currentTiddler)$\n\\end\n<$list filter=\"[is[current]has[_canonical_uri]]\">\n\n<div class=\"tc-message-box\">\n\n<<lingo External/Hint>>\n\n<a href={{!!_canonical_uri}}><$text text={{!!_canonical_uri}}/></a>\n\n<$edit-text field=\"_canonical_uri\" class=\"tc-edit-fields\"></$edit-text>\n\n</div>\n\n</$list>\n\n<$list filter=\"[is[current]!has[_canonical_uri]]\">\n\n<$reveal state=\"$:/state/showeditpreview\" type=\"match\" text=\"yes\">\n\n<div class=\"tc-tiddler-preview\">\n\n<$transclude tiddler=\"$:/core/ui/EditTemplate/body/editor\" mode=\"inline\"/>\n\n<div class=\"tc-tiddler-preview-preview\">\n\n<$transclude tiddler={{$:/state/editpreviewtype}} mode=\"inline\">\n\n<$transclude tiddler=\"$:/core/ui/EditTemplate/body/preview/output\" mode=\"inline\"/>\n\n</$transclude>\n\n</div>\n\n</div>\n\n</$reveal>\n\n<$reveal state=\"$:/state/showeditpreview\" type=\"nomatch\" text=\"yes\">\n\n<$transclude tiddler=\"$:/core/ui/EditTemplate/body/editor\" mode=\"inline\"/>\n\n</$reveal>\n\n</$list>\n"
        },
        "$:/core/ui/EditTemplate/controls": {
            "title": "$:/core/ui/EditTemplate/controls",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define config-title()\n$:/config/EditToolbarButtons/Visibility/$(listItem)$\n\\end\n<div class=\"tc-tiddler-title tc-tiddler-edit-title\">\n<$view field=\"title\"/>\n<span class=\"tc-tiddler-controls tc-titlebar\"><$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditToolbar]!has[draft.of]]\" variable=\"listItem\"><$reveal type=\"nomatch\" state=<<config-title>> text=\"hide\"><$transclude tiddler=<<listItem>>/></$reveal></$list></span>\n<div style=\"clear: both;\"></div>\n</div>\n"
        },
        "$:/core/ui/EditTemplate/fields": {
            "title": "$:/core/ui/EditTemplate/fields",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/\n\\define config-title()\n$:/config/EditTemplateFields/Visibility/$(currentField)$\n\\end\n\n\\define config-filter()\n[[hide]] -[title{$(config-title)$}]\n\\end\n\n\\define new-field-inner()\n<$reveal type=\"nomatch\" text=\"\" default=<<name>>>\n<$button>\n<$action-sendmessage $message=\"tm-add-field\" $name=<<name>> $value=<<value>>/>\n<$action-deletetiddler $tiddler=\"$:/temp/newfieldname\"/>\n<$action-deletetiddler $tiddler=\"$:/temp/newfieldvalue\"/>\n<<lingo Fields/Add/Button>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" text=\"\" default=<<name>>>\n<$button>\n<<lingo Fields/Add/Button>>\n</$button>\n</$reveal>\n\\end\n\n\\define new-field()\n<$set name=\"name\" value={{$:/temp/newfieldname}}>\n<$set name=\"value\" value={{$:/temp/newfieldvalue}}>\n<<new-field-inner>>\n</$set>\n</$set>\n\\end\n\n<div class=\"tc-edit-fields\">\n<table class=\"tc-edit-fields\">\n<tbody>\n<$list filter=\"[all[current]fields[]] +[sort[title]]\" variable=\"currentField\">\n<$list filter=<<config-filter>> variable=\"temp\">\n<tr class=\"tc-edit-field\">\n<td class=\"tc-edit-field-name\">\n<$text text=<<currentField>>/>:</td>\n<td class=\"tc-edit-field-value\">\n<$edit-text tiddler=<<currentTiddler>> field=<<currentField>> placeholder={{$:/language/EditTemplate/Fields/Add/Value/Placeholder}}/>\n</td>\n<td class=\"tc-edit-field-remove\">\n<$button class=\"tc-btn-invisible\" tooltip={{$:/language/EditTemplate/Field/Remove/Hint}} aria-label={{$:/language/EditTemplate/Field/Remove/Caption}}>\n<$action-deletefield $field=<<currentField>>/>\n{{$:/core/images/delete-button}}\n</$button>\n</td>\n</tr>\n</$list>\n</$list>\n</tbody>\n</table>\n</div>\n\n<$fieldmangler>\n<div class=\"tc-edit-field-add\">\n<em class=\"tc-edit\">\n<<lingo Fields/Add/Prompt>>\n</em>\n<span class=\"tc-edit-field-add-name\">\n<$edit-text tiddler=\"$:/temp/newfieldname\" tag=\"input\" default=\"\" placeholder={{$:/language/EditTemplate/Fields/Add/Name/Placeholder}} focusPopup=<<qualify \"$:/state/popup/field-dropdown\">> class=\"tc-edit-texteditor tc-popup-handle\"/>\n</span>\n<$button popup=<<qualify \"$:/state/popup/field-dropdown\">> class=\"tc-btn-invisible tc-btn-dropdown\" tooltip={{$:/language/EditTemplate/Field/Dropdown/Hint}} aria-label={{$:/language/EditTemplate/Field/Dropdown/Caption}}>{{$:/core/images/down-arrow}}</$button>\n<$reveal state=<<qualify \"$:/state/popup/field-dropdown\">> type=\"nomatch\" text=\"\" default=\"\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown\">\n<$linkcatcher to=\"$:/temp/newfieldname\">\n<div class=\"tc-dropdown-item\">\n<<lingo Fields/Add/Dropdown/User>>\n</div>\n<$list filter=\"[!is[shadow]!is[system]fields[]sort[]] -created -creator -draft.of -draft.title -modified -modifier -tags -text -title -type\"  variable=\"currentField\">\n<$link to=<<currentField>>>\n<<currentField>>\n</$link>\n</$list>\n<div class=\"tc-dropdown-item\">\n<<lingo Fields/Add/Dropdown/System>>\n</div>\n<$list filter=\"[fields[]sort[]] -[!is[shadow]!is[system]fields[]]\" variable=\"currentField\">\n<$link to=<<currentField>>>\n<<currentField>>\n</$link>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>\n<span class=\"tc-edit-field-add-value\">\n<$edit-text tiddler=\"$:/temp/newfieldvalue\" tag=\"input\" default=\"\" placeholder={{$:/language/EditTemplate/Fields/Add/Value/Placeholder}} class=\"tc-edit-texteditor\"/>\n</span>\n<span class=\"tc-edit-field-add-button\">\n<$macrocall $name=\"new-field\"/>\n</span>\n</div>\n</$fieldmangler>\n\n"
        },
        "$:/core/ui/EditTemplate/body/preview/output": {
            "title": "$:/core/ui/EditTemplate/body/preview/output",
            "tags": "$:/tags/EditPreview",
            "caption": "{{$:/language/EditTemplate/Body/Preview/Type/Output}}",
            "text": "<$set name=\"tv-tiddler-preview\" value=\"yes\">\n\n<$transclude />\n\n</$set>\n"
        },
        "$:/core/ui/EditTemplate/shadow": {
            "title": "$:/core/ui/EditTemplate/shadow",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/Shadow/\n\\define pluginLinkBody()\n<$link to=\"\"\"$(pluginTitle)$\"\"\">\n<$text text=\"\"\"$(pluginTitle)$\"\"\"/>\n</$link>\n\\end\n<$list filter=\"[all[current]get[draft.of]is[shadow]!is[tiddler]]\">\n\n<$list filter=\"[all[current]shadowsource[]]\" variable=\"pluginTitle\">\n\n<$set name=\"pluginLink\" value=<<pluginLinkBody>>>\n<div class=\"tc-message-box\">\n\n<<lingo Warning>>\n\n</div>\n</$set>\n</$list>\n\n</$list>\n\n<$list filter=\"[all[current]get[draft.of]is[shadow]is[tiddler]]\">\n\n<$list filter=\"[all[current]shadowsource[]]\" variable=\"pluginTitle\">\n\n<$set name=\"pluginLink\" value=<<pluginLinkBody>>>\n<div class=\"tc-message-box\">\n\n<<lingo OverriddenWarning>>\n\n</div>\n</$set>\n</$list>\n\n</$list>"
        },
        "$:/core/ui/EditTemplate/tags": {
            "title": "$:/core/ui/EditTemplate/tags",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/\n\\define tag-styles()\nbackground-color:$(backgroundColor)$;\nfill:$(foregroundColor)$;\ncolor:$(foregroundColor)$;\n\\end\n\\define tag-body-inner(colour,fallbackTarget,colourA,colourB)\n<$vars foregroundColor=<<contrastcolour target:\"\"\"$colour$\"\"\" fallbackTarget:\"\"\"$fallbackTarget$\"\"\" colourA:\"\"\"$colourA$\"\"\" colourB:\"\"\"$colourB$\"\"\">> backgroundColor=\"\"\"$colour$\"\"\">\n<span style=<<tag-styles>> class=\"tc-tag-label\">\n<$view field=\"title\" format=\"text\" />\n<$button message=\"tm-remove-tag\" param={{!!title}} class=\"tc-btn-invisible tc-remove-tag-button\">&times;</$button>\n</span>\n</$vars>\n\\end\n\\define tag-body(colour,palette)\n<$macrocall $name=\"tag-body-inner\" colour=\"\"\"$colour$\"\"\" fallbackTarget={{$palette$##tag-background}} colourA={{$palette$##foreground}} colourB={{$palette$##background}}/>\n\\end\n<div class=\"tc-edit-tags\">\n<$fieldmangler>\n<$list filter=\"[all[current]tags[]sort[title]]\" storyview=\"pop\">\n<$macrocall $name=\"tag-body\" colour={{!!color}} palette={{$:/palette}}/>\n</$list>\n\n<div class=\"tc-edit-add-tag\">\n<span class=\"tc-add-tag-name\">\n<$edit-text tiddler=\"$:/temp/NewTagName\" tag=\"input\" default=\"\" placeholder={{$:/language/EditTemplate/Tags/Add/Placeholder}} focusPopup=<<qualify \"$:/state/popup/tags-auto-complete\">> class=\"tc-edit-texteditor tc-popup-handle\"/>\n</span> <$button popup=<<qualify \"$:/state/popup/tags-auto-complete\">> class=\"tc-btn-invisible tc-btn-dropdown\" tooltip={{$:/language/EditTemplate/Tags/Dropdown/Hint}} aria-label={{$:/language/EditTemplate/Tags/Dropdown/Caption}}>{{$:/core/images/down-arrow}}</$button> <span class=\"tc-add-tag-button\">\n<$button message=\"tm-add-tag\" param={{$:/temp/NewTagName}} set=\"$:/temp/NewTagName\" setTo=\"\" class=\"\">\n<<lingo Tags/Add/Button>>\n</$button>\n</span>\n</div>\n\n<div class=\"tc-block-dropdown-wrapper\">\n<$reveal state=<<qualify \"$:/state/popup/tags-auto-complete\">> type=\"nomatch\" text=\"\" default=\"\">\n<div class=\"tc-block-dropdown\">\n<$linkcatcher set=\"$:/temp/NewTagName\" setTo=\"\" message=\"tm-add-tag\">\n<$list filter=\"[tags[]!is[system]search:title{$:/temp/NewTagName}sort[]]\">\n{{||$:/core/ui/Components/tag-link}}\n</$list>\n<hr>\n<$list filter=\"[tags[]is[system]search:title{$:/temp/NewTagName}sort[]]\">\n{{||$:/core/ui/Components/tag-link}}\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>\n</div>\n</$fieldmangler>\n</div>"
        },
        "$:/core/ui/EditTemplate/title": {
            "title": "$:/core/ui/EditTemplate/title",
            "tags": "$:/tags/EditTemplate",
            "text": "<$vars pattern=\"\"\"[\\|\\[\\]{}]\"\"\" bad-chars=\"\"\"`| [ ] { }`\"\"\">\n\n<$list filter=\"[is[current]regexp:draft.title<pattern>]\" variable=\"listItem\">\n\n<div class=\"tc-message-box\">\n\n{{$:/language/EditTemplate/Title/BadCharacterWarning}}\n\n</div>\n\n</$list>\n\n</$vars>\n\n<$edit-text field=\"draft.title\" class=\"tc-titlebar tc-edit-texteditor\" focus=\"true\"/>\n"
        },
        "$:/core/ui/EditTemplate/type": {
            "title": "$:/core/ui/EditTemplate/type",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/\n<div class=\"tc-type-selector\"><$fieldmangler>\n<em class=\"tc-edit\"><<lingo Type/Prompt>></em> <$edit-text field=\"type\" tag=\"input\" default=\"\" placeholder={{$:/language/EditTemplate/Type/Placeholder}} focusPopup=<<qualify \"$:/state/popup/type-dropdown\">> class=\"tc-edit-typeeditor tc-popup-handle\"/> <$button popup=<<qualify \"$:/state/popup/type-dropdown\">> class=\"tc-btn-invisible tc-btn-dropdown\" tooltip={{$:/language/EditTemplate/Type/Dropdown/Hint}} aria-label={{$:/language/EditTemplate/Type/Dropdown/Caption}}>{{$:/core/images/down-arrow}}</$button> <$button message=\"tm-remove-field\" param=\"type\" class=\"tc-btn-invisible tc-btn-icon\" tooltip={{$:/language/EditTemplate/Type/Delete/Hint}} aria-label={{$:/language/EditTemplate/Type/Delete/Caption}}>{{$:/core/images/delete-button}}</$button>\n</$fieldmangler></div>\n\n<div class=\"tc-block-dropdown-wrapper\">\n<$reveal state=<<qualify \"$:/state/popup/type-dropdown\">> type=\"nomatch\" text=\"\" default=\"\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown\">\n<$linkcatcher to=\"!!type\">\n<$list filter='[all[shadows+tiddlers]prefix[$:/language/Docs/Types/]each[group]sort[group]]'>\n<div class=\"tc-dropdown-item\">\n<$text text={{!!group}}/>\n</div>\n<$list filter=\"[all[shadows+tiddlers]prefix[$:/language/Docs/Types/]group{!!group}] +[sort[description]]\"><$link to={{!!name}}><$view field=\"description\"/> (<$view field=\"name\"/>)</$link>\n</$list>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>\n</div>"
        },
        "$:/core/ui/EditTemplate": {
            "title": "$:/core/ui/EditTemplate",
            "text": "\\define frame-classes()\ntc-tiddler-frame tc-tiddler-edit-frame $(missingTiddlerClass)$ $(shadowTiddlerClass)$ $(systemTiddlerClass)$\n\\end\n<div class=<<frame-classes>>>\n<$set name=\"storyTiddler\" value=<<currentTiddler>>>\n<$keyboard key=\"((cancel-edit-tiddler))\" message=\"tm-cancel-tiddler\">\n<$keyboard key=\"((save-tiddler))\" message=\"tm-save-tiddler\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditTemplate]!has[draft.of]]\" variable=\"listItem\">\n<$transclude tiddler=<<listItem>>/>\n</$list>\n</$keyboard>\n</$keyboard>\n</$set>\n</div>\n"
        },
        "$:/core/ui/Buttons/cancel": {
            "title": "$:/core/ui/Buttons/cancel",
            "tags": "$:/tags/EditToolbar",
            "caption": "{{$:/core/images/cancel-button}} {{$:/language/Buttons/Cancel/Caption}}",
            "description": "{{$:/language/Buttons/Cancel/Hint}}",
            "text": "<$button message=\"tm-cancel-tiddler\" tooltip={{$:/language/Buttons/Cancel/Hint}} aria-label={{$:/language/Buttons/Cancel/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/cancel-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Cancel/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/delete": {
            "title": "$:/core/ui/Buttons/delete",
            "tags": "$:/tags/EditToolbar $:/tags/ViewToolbar",
            "caption": "{{$:/core/images/delete-button}} {{$:/language/Buttons/Delete/Caption}}",
            "description": "{{$:/language/Buttons/Delete/Hint}}",
            "text": "<$button message=\"tm-delete-tiddler\" tooltip={{$:/language/Buttons/Delete/Hint}} aria-label={{$:/language/Buttons/Delete/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/delete-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Delete/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/save": {
            "title": "$:/core/ui/Buttons/save",
            "tags": "$:/tags/EditToolbar",
            "caption": "{{$:/core/images/done-button}} {{$:/language/Buttons/Save/Caption}}",
            "description": "{{$:/language/Buttons/Save/Hint}}",
            "text": "<$fieldmangler><$button tooltip={{$:/language/Buttons/Save/Hint}} aria-label={{$:/language/Buttons/Save/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-add-tag\" $param={{$:/temp/NewTagName}}/>\n<$action-deletetiddler $tiddler=\"$:/temp/NewTagName\"/>\n<$action-sendmessage $message=\"tm-add-field\" $name={{$:/temp/newfieldname}} $value={{$:/temp/newfieldvalue}}/>\n<$action-deletetiddler $tiddler=\"$:/temp/newfieldname\"/>\n<$action-deletetiddler $tiddler=\"$:/temp/newfieldvalue\"/>\n<$action-sendmessage $message=\"tm-save-tiddler\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/done-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Save/Caption}}/></span>\n</$list>\n</$button>\n</$fieldmangler>\n"
        },
        "$:/core/Filters/AllTags": {
            "title": "$:/core/Filters/AllTags",
            "tags": "$:/tags/Filter",
            "filter": "[tags[]!is[system]sort[title]]",
            "description": "{{$:/language/Filters/AllTags}}",
            "text": ""
        },
        "$:/core/Filters/AllTiddlers": {
            "title": "$:/core/Filters/AllTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[!is[system]sort[title]]",
            "description": "{{$:/language/Filters/AllTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/Drafts": {
            "title": "$:/core/Filters/Drafts",
            "tags": "$:/tags/Filter",
            "filter": "[has[draft.of]sort[title]]",
            "description": "{{$:/language/Filters/Drafts}}",
            "text": ""
        },
        "$:/core/Filters/Missing": {
            "title": "$:/core/Filters/Missing",
            "tags": "$:/tags/Filter",
            "filter": "[all[missing]sort[title]]",
            "description": "{{$:/language/Filters/Missing}}",
            "text": ""
        },
        "$:/core/Filters/Orphans": {
            "title": "$:/core/Filters/Orphans",
            "tags": "$:/tags/Filter",
            "filter": "[all[orphans]sort[title]]",
            "description": "{{$:/language/Filters/Orphans}}",
            "text": ""
        },
        "$:/core/Filters/OverriddenShadowTiddlers": {
            "title": "$:/core/Filters/OverriddenShadowTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[is[shadow]]",
            "description": "{{$:/language/Filters/OverriddenShadowTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/RecentSystemTiddlers": {
            "title": "$:/core/Filters/RecentSystemTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[has[modified]!sort[modified]limit[50]]",
            "description": "{{$:/language/Filters/RecentSystemTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/RecentTiddlers": {
            "title": "$:/core/Filters/RecentTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[!is[system]has[modified]!sort[modified]limit[50]]",
            "description": "{{$:/language/Filters/RecentTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/ShadowTiddlers": {
            "title": "$:/core/Filters/ShadowTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[all[shadows]sort[title]]",
            "description": "{{$:/language/Filters/ShadowTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/SystemTags": {
            "title": "$:/core/Filters/SystemTags",
            "tags": "$:/tags/Filter",
            "filter": "[all[shadows+tiddlers]tags[]is[system]sort[title]]",
            "description": "{{$:/language/Filters/SystemTags}}",
            "text": ""
        },
        "$:/core/Filters/SystemTiddlers": {
            "title": "$:/core/Filters/SystemTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[is[system]sort[title]]",
            "description": "{{$:/language/Filters/SystemTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/TypedTiddlers": {
            "title": "$:/core/Filters/TypedTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[!is[system]has[type]each[type]sort[type]] -[type[text/vnd.tiddlywiki]]",
            "description": "{{$:/language/Filters/TypedTiddlers}}",
            "text": ""
        },
        "$:/core/ui/ImportListing": {
            "title": "$:/core/ui/ImportListing",
            "text": "\\define lingo-base() $:/language/Import/\n\\define messageField()\nmessage-$(payloadTiddler)$\n\\end\n\\define selectionField()\nselection-$(payloadTiddler)$\n\\end\n\\define previewPopupState()\n$(currentTiddler)$!!popup-$(payloadTiddler)$\n\\end\n<table>\n<tbody>\n<tr>\n<th>\n<<lingo Listing/Select/Caption>>\n</th>\n<th>\n<<lingo Listing/Title/Caption>>\n</th>\n<th>\n<<lingo Listing/Status/Caption>>\n</th>\n</tr>\n<$list filter=\"[all[current]plugintiddlers[]sort[title]]\" variable=\"payloadTiddler\">\n<tr>\n<td>\n<$checkbox field=<<selectionField>> checked=\"checked\" unchecked=\"unchecked\" default=\"checked\"/>\n</td>\n<td>\n<$reveal type=\"nomatch\" state=<<previewPopupState>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<previewPopupState>> setTo=\"yes\">\n{{$:/core/images/right-arrow}}&nbsp;<$text text=<<payloadTiddler>>/>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<previewPopupState>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<previewPopupState>> setTo=\"no\">\n{{$:/core/images/down-arrow}}&nbsp;<$text text=<<payloadTiddler>>/>\n</$button>\n</$reveal>\n</td>\n<td>\n<$view field=<<messageField>>/>\n</td>\n</tr>\n<tr>\n<td colspan=\"3\">\n<$reveal type=\"match\" text=\"yes\" state=<<previewPopupState>>>\n<$transclude subtiddler=<<payloadTiddler>> mode=\"block\"/>\n</$reveal>\n</td>\n</tr>\n</$list>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/ListItemTemplate": {
            "title": "$:/core/ui/ListItemTemplate",
            "text": "<div class=\"tc-menu-list-item\">\n<$link to={{!!title}}>\n<$view field=\"title\"/>\n</$link>\n</div>"
        },
        "$:/core/ui/MissingTemplate": {
            "title": "$:/core/ui/MissingTemplate",
            "text": "<div class=\"tc-tiddler-missing\">\n<$button popup=<<qualify \"$:/state/popup/missing\">> class=\"tc-btn-invisible tc-missing-tiddler-label\">\n<$view field=\"title\" format=\"text\" />\n</$button>\n<$reveal state=<<qualify \"$:/state/popup/missing\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\">\n<$transclude tiddler=\"$:/core/ui/ListItemTemplate\"/>\n<hr>\n<$list filter=\"[all[current]backlinks[]sort[title]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n</div>\n</$reveal>\n</div>\n"
        },
        "$:/core/ui/MoreSideBar/All": {
            "title": "$:/core/ui/MoreSideBar/All",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/All/Caption}}",
            "text": "<$list filter={{$:/core/Filters/AllTiddlers!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Drafts": {
            "title": "$:/core/ui/MoreSideBar/Drafts",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Drafts/Caption}}",
            "text": "<$list filter={{$:/core/Filters/Drafts!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Missing": {
            "title": "$:/core/ui/MoreSideBar/Missing",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Missing/Caption}}",
            "text": "<$list filter={{$:/core/Filters/Missing!!filter}} template=\"$:/core/ui/MissingTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Orphans": {
            "title": "$:/core/ui/MoreSideBar/Orphans",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Orphans/Caption}}",
            "text": "<$list filter={{$:/core/Filters/Orphans!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Recent": {
            "title": "$:/core/ui/MoreSideBar/Recent",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Recent/Caption}}",
            "text": "<$macrocall $name=\"timeline\" format={{$:/language/RecentChanges/DateFormat}}/>\n"
        },
        "$:/core/ui/MoreSideBar/Shadows": {
            "title": "$:/core/ui/MoreSideBar/Shadows",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Shadows/Caption}}",
            "text": "<$list filter={{$:/core/Filters/ShadowTiddlers!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/System": {
            "title": "$:/core/ui/MoreSideBar/System",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/System/Caption}}",
            "text": "<$list filter={{$:/core/Filters/SystemTiddlers!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Tags": {
            "title": "$:/core/ui/MoreSideBar/Tags",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Tags/Caption}}",
            "text": "<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"\">\n\n{{$:/core/ui/Buttons/tag-manager}}\n\n</$set>\n\n</$set>\n\n</$set>\n\n<$list filter={{$:/core/Filters/AllTags!!filter}}>\n\n<$transclude tiddler=\"$:/core/ui/TagTemplate\"/>\n\n</$list>\n\n<hr class=\"tc-untagged-separator\">\n\n{{$:/core/ui/UntaggedTemplate}}\n"
        },
        "$:/core/ui/MoreSideBar/Types": {
            "title": "$:/core/ui/MoreSideBar/Types",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Types/Caption}}",
            "text": "<$list filter={{$:/core/Filters/TypedTiddlers!!filter}}>\n<div class=\"tc-menu-list-item\">\n<$view field=\"type\"/>\n<$list filter=\"[type{!!type}!is[system]sort[title]]\">\n<div class=\"tc-menu-list-subitem\">\n<$link to={{!!title}}><$view field=\"title\"/></$link>\n</div>\n</$list>\n</div>\n</$list>\n"
        },
        "$:/core/ui/Buttons/advanced-search": {
            "title": "$:/core/ui/Buttons/advanced-search",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/advanced-search-button}} {{$:/language/Buttons/AdvancedSearch/Caption}}",
            "description": "{{$:/language/Buttons/AdvancedSearch/Hint}}",
            "text": "\\define control-panel-button(class)\n<$button to=\"$:/AdvancedSearch\" tooltip={{$:/language/Buttons/AdvancedSearch/Hint}} aria-label={{$:/language/Buttons/AdvancedSearch/Caption}} class=\"\"\"$(tv-config-toolbar-class)$ $class$\"\"\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/advanced-search-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/AdvancedSearch/Caption}}/></span>\n</$list>\n</$button>\n\\end\n\n<$list filter=\"[list[$:/StoryList]] +[field:title[$:/AdvancedSearch]]\" emptyMessage=<<control-panel-button>>>\n<<control-panel-button \"tc-selected\">>\n</$list>\n"
        },
        "$:/core/ui/Buttons/close-all": {
            "title": "$:/core/ui/Buttons/close-all",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/close-all-button}} {{$:/language/Buttons/CloseAll/Caption}}",
            "description": "{{$:/language/Buttons/CloseAll/Hint}}",
            "text": "<$button message=\"tm-close-all-tiddlers\" tooltip={{$:/language/Buttons/CloseAll/Hint}} aria-label={{$:/language/Buttons/CloseAll/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/close-all-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/CloseAll/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/control-panel": {
            "title": "$:/core/ui/Buttons/control-panel",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/options-button}} {{$:/language/Buttons/ControlPanel/Caption}}",
            "description": "{{$:/language/Buttons/ControlPanel/Hint}}",
            "text": "\\define control-panel-button(class)\n<$button to=\"$:/ControlPanel\" tooltip={{$:/language/Buttons/ControlPanel/Hint}} aria-label={{$:/language/Buttons/ControlPanel/Caption}} class=\"\"\"$(tv-config-toolbar-class)$ $class$\"\"\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/options-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/ControlPanel/Caption}}/></span>\n</$list>\n</$button>\n\\end\n\n<$list filter=\"[list[$:/StoryList]] +[field:title[$:/ControlPanel]]\" emptyMessage=<<control-panel-button>>>\n<<control-panel-button \"tc-selected\">>\n</$list>\n"
        },
        "$:/core/ui/Buttons/encryption": {
            "title": "$:/core/ui/Buttons/encryption",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/locked-padlock}} {{$:/language/Buttons/Encryption/Caption}}",
            "description": "{{$:/language/Buttons/Encryption/Hint}}",
            "text": "<$reveal type=\"match\" state=\"$:/isEncrypted\" text=\"yes\">\n<$button message=\"tm-clear-password\" tooltip={{$:/language/Buttons/Encryption/ClearPassword/Hint}} aria-label={{$:/language/Buttons/Encryption/ClearPassword/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/locked-padlock}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Encryption/ClearPassword/Caption}}/></span>\n</$list>\n</$button>\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/isEncrypted\" text=\"yes\">\n<$button message=\"tm-set-password\" tooltip={{$:/language/Buttons/Encryption/SetPassword/Hint}} aria-label={{$:/language/Buttons/Encryption/SetPassword/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/unlocked-padlock}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Encryption/SetPassword/Caption}}/></span>\n</$list>\n</$button>\n</$reveal>"
        },
        "$:/core/ui/Buttons/export-page": {
            "title": "$:/core/ui/Buttons/export-page",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/export-button}} {{$:/language/Buttons/ExportPage/Caption}}",
            "description": "{{$:/language/Buttons/ExportPage/Hint}}",
            "text": "<$macrocall $name=\"exportButton\" exportFilter=\"[!is[system]sort[title]]\" lingoBase=\"$:/language/Buttons/ExportPage/\"/>"
        },
        "$:/core/ui/Buttons/fold-all": {
            "title": "$:/core/ui/Buttons/fold-all",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/fold-all-button}} {{$:/language/Buttons/FoldAll/Caption}}",
            "description": "{{$:/language/Buttons/FoldAll/Hint}}",
            "text": "<$button tooltip={{$:/language/Buttons/FoldAll/Hint}} aria-label={{$:/language/Buttons/FoldAll/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-fold-all-tiddlers\" $param=<<currentTiddler>> foldedStatePrefix=\"$:/state/folded/\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/fold-all-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/FoldAll/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/full-screen": {
            "title": "$:/core/ui/Buttons/full-screen",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/full-screen-button}} {{$:/language/Buttons/FullScreen/Caption}}",
            "description": "{{$:/language/Buttons/FullScreen/Hint}}",
            "text": "<$button message=\"tm-full-screen\" tooltip={{$:/language/Buttons/FullScreen/Hint}} aria-label={{$:/language/Buttons/FullScreen/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/full-screen-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/FullScreen/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/home": {
            "title": "$:/core/ui/Buttons/home",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/home-button}} {{$:/language/Buttons/Home/Caption}}",
            "description": "{{$:/language/Buttons/Home/Hint}}",
            "text": "<$button message=\"tm-home\" tooltip={{$:/language/Buttons/Home/Hint}} aria-label={{$:/language/Buttons/Home/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/home-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Home/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/import": {
            "title": "$:/core/ui/Buttons/import",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/import-button}} {{$:/language/Buttons/Import/Caption}}",
            "description": "{{$:/language/Buttons/Import/Hint}}",
            "text": "<div class=\"tc-file-input-wrapper\">\n<$button tooltip={{$:/language/Buttons/Import/Hint}} aria-label={{$:/language/Buttons/Import/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/import-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Import/Caption}}/></span>\n</$list>\n</$button>\n<$browse tooltip={{$:/language/Buttons/Import/Hint}}/>\n</div>"
        },
        "$:/core/ui/Buttons/language": {
            "title": "$:/core/ui/Buttons/language",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/globe}} {{$:/language/Buttons/Language/Caption}}",
            "description": "{{$:/language/Buttons/Language/Hint}}",
            "text": "\\define flag-title()\n$(languagePluginTitle)$/icon\n\\end\n<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/language\">> tooltip={{$:/language/Buttons/Language/Hint}} aria-label={{$:/language/Buttons/Language/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n<span class=\"tc-image-button\">\n<$set name=\"languagePluginTitle\" value={{$:/language}}>\n<$image source=<<flag-title>>/>\n</$set>\n</span>\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Language/Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/language\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down tc-drop-down-language-chooser\">\n<$linkcatcher to=\"$:/language\">\n<$list filter=\"[[$:/languages/en-GB]] [plugin-type[language]sort[description]]\">\n<$link>\n<span class=\"tc-drop-down-bullet\">\n<$reveal type=\"match\" state=\"$:/language\" text=<<currentTiddler>>>\n&bull;\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/language\" text=<<currentTiddler>>>\n&nbsp;\n</$reveal>\n</span>\n<span class=\"tc-image-button\">\n<$set name=\"languagePluginTitle\" value=<<currentTiddler>>>\n<$transclude subtiddler=<<flag-title>>>\n<$list filter=\"[all[current]field:title[$:/languages/en-GB]]\">\n<$transclude tiddler=\"$:/languages/en-GB/icon\"/>\n</$list>\n</$transclude>\n</$set>\n</span>\n<$view field=\"description\">\n<$view field=\"name\">\n<$view field=\"title\"/>\n</$view>\n</$view>\n</$link>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>"
        },
        "$:/core/ui/Buttons/more-page-actions": {
            "title": "$:/core/ui/Buttons/more-page-actions",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/down-arrow}} {{$:/language/Buttons/More/Caption}}",
            "description": "{{$:/language/Buttons/More/Hint}}",
            "text": "\\define config-title()\n$:/config/PageControlButtons/Visibility/$(listItem)$\n\\end\n<$button popup=<<qualify \"$:/state/popup/more\">> tooltip={{$:/language/Buttons/More/Hint}} aria-label={{$:/language/Buttons/More/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/down-arrow}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/More/Caption}}/></span>\n</$list>\n</$button><$reveal state=<<qualify \"$:/state/popup/more\">> type=\"popup\" position=\"below\" animate=\"yes\">\n\n<div class=\"tc-drop-down\">\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"tc-btn-invisible\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]] -[[$:/core/ui/Buttons/more-page-actions]]\" variable=\"listItem\">\n\n<$reveal type=\"match\" state=<<config-title>> text=\"hide\">\n\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n\n</$reveal>\n\n</$list>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</div>\n\n</$reveal>"
        },
        "$:/core/ui/Buttons/new-image": {
            "title": "$:/core/ui/Buttons/new-image",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/new-image-button}} {{$:/language/Buttons/NewImage/Caption}}",
            "description": "{{$:/language/Buttons/NewImage/Hint}}",
            "text": "<$button tooltip={{$:/language/Buttons/NewImage/Hint}} aria-label={{$:/language/Buttons/NewImage/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-new-tiddler\" type=\"image/jpeg\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-image-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewImage/Caption}}/></span>\n</$list>\n</$button>\n"
        },
        "$:/core/ui/Buttons/new-journal": {
            "title": "$:/core/ui/Buttons/new-journal",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/new-journal-button}} {{$:/language/Buttons/NewJournal/Caption}}",
            "description": "{{$:/language/Buttons/NewJournal/Hint}}",
            "text": "\\define journalButton()\n<$button tooltip={{$:/language/Buttons/NewJournal/Hint}} aria-label={{$:/language/Buttons/NewJournal/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-new-tiddler\" title=<<now \"$(journalTitleTemplate)$\">> tags=\"$(journalTags)$\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-journal-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewJournal/Caption}}/></span>\n</$list>\n</$button>\n\\end\n<$set name=\"journalTitleTemplate\" value={{$:/config/NewJournal/Title}}>\n<$set name=\"journalTags\" value={{$:/config/NewJournal/Tags}}>\n<<journalButton>>\n</$set></$set>"
        },
        "$:/core/ui/Buttons/new-tiddler": {
            "title": "$:/core/ui/Buttons/new-tiddler",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/new-button}} {{$:/language/Buttons/NewTiddler/Caption}}",
            "description": "{{$:/language/Buttons/NewTiddler/Hint}}",
            "text": "<$button message=\"tm-new-tiddler\" tooltip={{$:/language/Buttons/NewTiddler/Hint}} aria-label={{$:/language/Buttons/NewTiddler/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewTiddler/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/palette": {
            "title": "$:/core/ui/Buttons/palette",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/palette}} {{$:/language/Buttons/Palette/Caption}}",
            "description": "{{$:/language/Buttons/Palette/Hint}}",
            "text": "<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/palette\">> tooltip={{$:/language/Buttons/Palette/Hint}} aria-label={{$:/language/Buttons/Palette/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/palette}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Palette/Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/palette\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\" style=\"font-size:0.7em;\">\n{{$:/snippets/paletteswitcher}}\n</div>\n</$reveal>"
        },
        "$:/core/ui/Buttons/refresh": {
            "title": "$:/core/ui/Buttons/refresh",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/refresh-button}} {{$:/language/Buttons/Refresh/Caption}}",
            "description": "{{$:/language/Buttons/Refresh/Hint}}",
            "text": "<$button message=\"tm-browser-refresh\" tooltip={{$:/language/Buttons/Refresh/Hint}} aria-label={{$:/language/Buttons/Refresh/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/refresh-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Refresh/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/save-wiki": {
            "title": "$:/core/ui/Buttons/save-wiki",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/save-button}} {{$:/language/Buttons/SaveWiki/Caption}}",
            "description": "{{$:/language/Buttons/SaveWiki/Hint}}",
            "text": "<$button message=\"tm-save-wiki\" param={{$:/config/SaveWikiButton/Template}} tooltip={{$:/language/Buttons/SaveWiki/Hint}} aria-label={{$:/language/Buttons/SaveWiki/Caption}} class=<<tv-config-toolbar-class>>>\n<span class=\"tc-dirty-indicator\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/save-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/SaveWiki/Caption}}/></span>\n</$list>\n</span>\n</$button>"
        },
        "$:/core/ui/Buttons/storyview": {
            "title": "$:/core/ui/Buttons/storyview",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/storyview-classic}} {{$:/language/Buttons/StoryView/Caption}}",
            "description": "{{$:/language/Buttons/StoryView/Hint}}",
            "text": "\\define icon()\n$:/core/images/storyview-$(storyview)$\n\\end\n<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/storyview\">> tooltip={{$:/language/Buttons/StoryView/Hint}} aria-label={{$:/language/Buttons/StoryView/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n<$set name=\"storyview\" value={{$:/view}}>\n<$transclude tiddler=<<icon>>/>\n</$set>\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/StoryView/Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/storyview\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\">\n<$linkcatcher to=\"$:/view\">\n<$list filter=\"[storyviews[]]\" variable=\"storyview\">\n<$link to=<<storyview>>>\n<span class=\"tc-drop-down-bullet\">\n<$reveal type=\"match\" state=\"$:/view\" text=<<storyview>>>\n&bull;\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/view\" text=<<storyview>>>\n&nbsp;\n</$reveal>\n</span>\n<$transclude tiddler=<<icon>>/>\n<$text text=<<storyview>>/></$link>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>"
        },
        "$:/core/ui/Buttons/tag-manager": {
            "title": "$:/core/ui/Buttons/tag-manager",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/tag-button}} {{$:/language/Buttons/TagManager/Caption}}",
            "description": "{{$:/language/Buttons/TagManager/Hint}}",
            "text": "\\define control-panel-button(class)\n<$button to=\"$:/TagManager\" tooltip={{$:/language/Buttons/TagManager/Hint}} aria-label={{$:/language/Buttons/TagManager/Caption}} class=\"\"\"$(tv-config-toolbar-class)$ $class$\"\"\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/tag-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/TagManager/Caption}}/></span>\n</$list>\n</$button>\n\\end\n\n<$list filter=\"[list[$:/StoryList]] +[field:title[$:/TagManager]]\" emptyMessage=<<control-panel-button>>>\n<<control-panel-button \"tc-selected\">>\n</$list>\n"
        },
        "$:/core/ui/Buttons/theme": {
            "title": "$:/core/ui/Buttons/theme",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/theme-button}} {{$:/language/Buttons/Theme/Caption}}",
            "description": "{{$:/language/Buttons/Theme/Hint}}",
            "text": "<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/theme\">> tooltip={{$:/language/Buttons/Theme/Hint}} aria-label={{$:/language/Buttons/Theme/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/theme-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Theme/Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/theme\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\">\n<$linkcatcher to=\"$:/theme\">\n<$list filter=\"[plugin-type[theme]sort[title]]\" variable=\"themeTitle\">\n<$link to=<<themeTitle>>>\n<span class=\"tc-drop-down-bullet\">\n<$reveal type=\"match\" state=\"$:/theme\" text=<<themeTitle>>>\n&bull;\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/theme\" text=<<themeTitle>>>\n&nbsp;\n</$reveal>\n</span>\n<$view tiddler=<<themeTitle>> field=\"name\"/>\n</$link>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>"
        },
        "$:/core/ui/Buttons/unfold-all": {
            "title": "$:/core/ui/Buttons/unfold-all",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/unfold-all-button}} {{$:/language/Buttons/UnfoldAll/Caption}}",
            "description": "{{$:/language/Buttons/UnfoldAll/Hint}}",
            "text": "<$button tooltip={{$:/language/Buttons/UnfoldAll/Hint}} aria-label={{$:/language/Buttons/UnfoldAll/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-unfold-all-tiddlers\" $param=<<currentTiddler>> foldedStatePrefix=\"$:/state/folded/\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/unfold-all-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/UnfoldAll/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/PageTemplate/pagecontrols": {
            "title": "$:/core/ui/PageTemplate/pagecontrols",
            "text": "\\define config-title()\n$:/config/PageControlButtons/Visibility/$(listItem)$\n\\end\n<div class=\"tc-page-controls\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]]\" variable=\"listItem\">\n<$reveal type=\"nomatch\" state=<<config-title>> text=\"hide\">\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n</$reveal>\n</$list>\n</div>\n\n"
        },
        "$:/core/ui/PageStylesheet": {
            "title": "$:/core/ui/PageStylesheet",
            "text": "<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n\n<$set name=\"currentTiddler\" value={{$:/language}}>\n\n<$set name=\"languageTitle\" value={{!!name}}>\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Stylesheet]!has[draft.of]]\">\n<$transclude mode=\"block\"/>\n</$list>\n\n</$set>\n\n</$set>\n\n</$importvariables>\n"
        },
        "$:/core/ui/PageTemplate/alerts": {
            "title": "$:/core/ui/PageTemplate/alerts",
            "tags": "$:/tags/PageTemplate",
            "text": "<div class=\"tc-alerts\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Alert]!has[draft.of]]\" template=\"$:/core/ui/AlertTemplate\" storyview=\"pop\"/>\n\n</div>\n"
        },
        "$:/core/ui/PageTemplate/pluginreloadwarning": {
            "title": "$:/core/ui/PageTemplate/pluginreloadwarning",
            "tags": "$:/tags/PageTemplate",
            "text": "\\define lingo-base() $:/language/\n\n<$list filter=\"[has[plugin-type]haschanged[]!plugin-type[import]limit[1]]\">\n\n<$reveal type=\"nomatch\" state=\"$:/temp/HidePluginWarning\" text=\"yes\">\n\n<div class=\"tc-plugin-reload-warning\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"\">\n\n<<lingo PluginReloadWarning>> <$button set=\"$:/temp/HidePluginWarning\" setTo=\"yes\" class=\"tc-btn-invisible\">{{$:/core/images/close-button}}</$button>\n\n</$set>\n\n</div>\n\n</$reveal>\n\n</$list>\n"
        },
        "$:/core/ui/PageTemplate/sidebar": {
            "title": "$:/core/ui/PageTemplate/sidebar",
            "tags": "$:/tags/PageTemplate",
            "text": "<$scrollable fallthrough=\"no\" class=\"tc-sidebar-scrollable\">\n\n<div class=\"tc-sidebar-header\">\n\n<$reveal state=\"$:/state/sidebar\" type=\"match\" text=\"yes\" default=\"yes\" retain=\"yes\" animate=\"yes\">\n\n<h1 class=\"tc-site-title\">\n\n<$transclude tiddler=\"$:/SiteTitle\" mode=\"inline\"/>\n\n</h1>\n\n<div class=\"tc-site-subtitle\">\n\n<$transclude tiddler=\"$:/SiteSubtitle\" mode=\"inline\"/>\n\n</div>\n\n{{||$:/core/ui/PageTemplate/pagecontrols}}\n\n<$transclude tiddler=\"$:/core/ui/SideBarLists\" mode=\"inline\"/>\n\n</$reveal>\n\n</div>\n\n</$scrollable>"
        },
        "$:/core/ui/PageTemplate/story": {
            "title": "$:/core/ui/PageTemplate/story",
            "tags": "$:/tags/PageTemplate",
            "text": "<section class=\"tc-story-river\">\n\n<section class=\"story-backdrop\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/AboveStory]!has[draft.of]]\">\n\n<$transclude/>\n\n</$list>\n\n</section>\n\n<$list filter=\"[list[$:/StoryList]]\" history=\"$:/HistoryList\" template=\"$:/core/ui/ViewTemplate\" editTemplate=\"$:/core/ui/EditTemplate\" storyview={{$:/view}} emptyMessage={{$:/config/EmptyStoryMessage}}/>\n\n<section class=\"story-frontdrop\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/BelowStory]!has[draft.of]]\">\n\n<$transclude/>\n\n</$list>\n\n</section>\n\n</section>\n"
        },
        "$:/core/ui/PageTemplate/topleftbar": {
            "title": "$:/core/ui/PageTemplate/topleftbar",
            "tags": "$:/tags/PageTemplate",
            "text": "<span class=\"tc-topbar tc-topbar-left\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TopLeftBar]!has[draft.of]]\" variable=\"listItem\">\n\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n\n</$list>\n\n</span>\n"
        },
        "$:/core/ui/PageTemplate/toprightbar": {
            "title": "$:/core/ui/PageTemplate/toprightbar",
            "tags": "$:/tags/PageTemplate",
            "text": "<span class=\"tc-topbar tc-topbar-right\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TopRightBar]!has[draft.of]]\" variable=\"listItem\">\n\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n\n</$list>\n\n</span>\n"
        },
        "$:/core/ui/PageTemplate": {
            "title": "$:/core/ui/PageTemplate",
            "text": "\\define containerClasses()\ntc-page-container tc-page-view-$(themeTitle)$ tc-language-$(languageTitle)$\n\\end\n\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n\n<$set name=\"tv-config-toolbar-icons\" value={{$:/config/Toolbar/Icons}}>\n\n<$set name=\"tv-config-toolbar-text\" value={{$:/config/Toolbar/Text}}>\n\n<$set name=\"tv-config-toolbar-class\" value={{$:/config/Toolbar/ButtonClass}}>\n\n<$set name=\"themeTitle\" value={{$:/view}}>\n\n<$set name=\"currentTiddler\" value={{$:/language}}>\n\n<$set name=\"languageTitle\" value={{!!name}}>\n\n<$set name=\"currentTiddler\" value=\"\">\n\n<div class=<<containerClasses>>>\n\n<$navigator story=\"$:/StoryList\" history=\"$:/HistoryList\" openLinkFromInsideRiver={{$:/config/Navigation/openLinkFromInsideRiver}} openLinkFromOutsideRiver={{$:/config/Navigation/openLinkFromOutsideRiver}}>\n\n<$dropzone>\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageTemplate]!has[draft.of]]\" variable=\"listItem\">\n\n<$transclude tiddler=<<listItem>>/>\n\n</$list>\n\n</$dropzone>\n\n</$navigator>\n\n</div>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$importvariables>\n"
        },
        "$:/core/ui/PluginInfo": {
            "title": "$:/core/ui/PluginInfo",
            "text": "\\define localised-info-tiddler-title()\n$(currentTiddler)$/$(languageTitle)$/$(currentTab)$\n\\end\n\\define info-tiddler-title()\n$(currentTiddler)$/$(currentTab)$\n\\end\n<$transclude tiddler=<<localised-info-tiddler-title>> mode=\"block\">\n<$transclude tiddler=<<currentTiddler>> subtiddler=<<localised-info-tiddler-title>> mode=\"block\">\n<$transclude tiddler=<<currentTiddler>> subtiddler=<<info-tiddler-title>> mode=\"block\">\n{{$:/language/ControlPanel/Plugin/NoInfoFound/Hint}}\n</$transclude>\n</$transclude>\n</$transclude>\n"
        },
        "$:/core/ui/SearchResults": {
            "title": "$:/core/ui/SearchResults",
            "text": "<div class=\"tc-search-results\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]butfirst[]limit[1]]\" emptyMessage=\"\"\"\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]]\">\n<$transclude mode=\"block\"/>\n</$list>\n\"\"\">\n\n<$macrocall $name=\"tabs\" tabsList=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]]\" default={{$:/config/SearchResults/Default}}/>\n\n</$list>\n\n</div>\n"
        },
        "$:/core/ui/SideBar/More": {
            "title": "$:/core/ui/SideBar/More",
            "tags": "$:/tags/SideBar",
            "caption": "{{$:/language/SideBar/More/Caption}}",
            "text": "<div class=\"tc-more-sidebar\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/MoreSideBar]!has[draft.of]]\" \"$:/core/ui/MoreSideBar/Tags\" \"$:/state/tab/moresidebar\" \"tc-vertical\">>\n</div>\n"
        },
        "$:/core/ui/SideBar/Open": {
            "title": "$:/core/ui/SideBar/Open",
            "tags": "$:/tags/SideBar",
            "caption": "{{$:/language/SideBar/Open/Caption}}",
            "text": "\\define lingo-base() $:/language/CloseAll/\n<$list filter=\"[list[$:/StoryList]]\" history=\"$:/HistoryList\" storyview=\"pop\">\n\n<$button message=\"tm-close-tiddler\" tooltip={{$:/language/Buttons/Close/Hint}} aria-label={{$:/language/Buttons/Close/Caption}} class=\"tc-btn-invisible tc-btn-mini\">&times;</$button> <$link to={{!!title}}><$view field=\"title\"/></$link>\n\n</$list>\n\n<$button message=\"tm-close-all-tiddlers\" class=\"tc-btn-invisible tc-btn-mini\"><<lingo Button>></$button>\n"
        },
        "$:/core/ui/SideBar/Recent": {
            "title": "$:/core/ui/SideBar/Recent",
            "tags": "$:/tags/SideBar",
            "caption": "{{$:/language/SideBar/Recent/Caption}}",
            "text": "<$macrocall $name=\"timeline\" format={{$:/language/RecentChanges/DateFormat}}/>\n"
        },
        "$:/core/ui/SideBar/Tools": {
            "title": "$:/core/ui/SideBar/Tools",
            "tags": "$:/tags/SideBar",
            "caption": "{{$:/language/SideBar/Tools/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/\n\\define config-title()\n$:/config/PageControlButtons/Visibility/$(listItem)$\n\\end\n\n<<lingo Basics/Version/Prompt>> <<version>>\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]]\" variable=\"listItem\">\n\n<div style=\"position:relative;\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>>/> <i class=\"tc-muted\"><$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</div>\n\n</$list>\n\n</$set>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/SideBarLists": {
            "title": "$:/core/ui/SideBarLists",
            "text": "<div class=\"tc-sidebar-lists\">\n\n<$set name=\"searchTiddler\" value=\"$:/temp/search\">\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/search\" type=\"search\" tag=\"input\" focus={{$:/config/Search/AutoFocus}} focusPopup=<<qualify \"$:/state/popup/search-dropdown\">> class=\"tc-popup-handle\"/>\n<$reveal state=\"$:/temp/search\" type=\"nomatch\" text=\"\">\n<$button tooltip={{$:/language/Buttons/AdvancedSearch/Hint}} aria-label={{$:/language/Buttons/AdvancedSearch/Caption}} class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" text={{$:/temp/search}}/>\n<$action-setfield $tiddler=\"$:/temp/search\" text=\"\"/>\n<$action-navigate $to=\"$:/AdvancedSearch\"/>\n{{$:/core/images/advanced-search-button}}\n</$button>\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/search\" text=\"\" />\n{{$:/core/images/close-button}}\n</$button>\n<$button popup=<<qualify \"$:/state/popup/search-dropdown\">> class=\"tc-btn-invisible\">\n<$set name=\"resultCount\" value=\"\"\"<$count filter=\"[!is[system]search{$(searchTiddler)$}]\"/>\"\"\">\n{{$:/core/images/down-arrow}} {{$:/language/Search/Matches}}\n</$set>\n</$button>\n</$reveal>\n<$reveal state=\"$:/temp/search\" type=\"match\" text=\"\">\n<$button to=\"$:/AdvancedSearch\" tooltip={{$:/language/Buttons/AdvancedSearch/Hint}} aria-label={{$:/language/Buttons/AdvancedSearch/Caption}} class=\"tc-btn-invisible\">\n{{$:/core/images/advanced-search-button}}\n</$button>\n</$reveal>\n</div>\n\n<$reveal tag=\"div\" class=\"tc-block-dropdown-wrapper\" state=\"$:/temp/search\" type=\"nomatch\" text=\"\">\n\n<$reveal tag=\"div\" class=\"tc-block-dropdown tc-search-drop-down tc-popup-handle\" state=<<qualify \"$:/state/popup/search-dropdown\">> type=\"nomatch\" text=\"\" default=\"\">\n\n{{$:/core/ui/SearchResults}}\n\n</$reveal>\n\n</$reveal>\n\n</$set>\n\n<$macrocall $name=\"tabs\" tabsList=\"[all[shadows+tiddlers]tag[$:/tags/SideBar]!has[draft.of]]\" default={{$:/config/DefaultSidebarTab}} state=\"$:/state/tab/sidebar\" />\n\n</div>\n"
        },
        "$:/TagManager": {
            "title": "$:/TagManager",
            "icon": "$:/core/images/tag-button",
            "color": "#bbb",
            "text": "\\define lingo-base() $:/language/TagManager/\n\\define iconEditorTab(type)\n<$list filter=\"[all[shadows+tiddlers]is[image]] [all[shadows+tiddlers]tag[$:/tags/Image]] -[type[application/pdf]] +[sort[title]] +[$type$is[system]]\">\n<$link to={{!!title}}>\n<$transclude/> <$view field=\"title\"/>\n</$link>\n</$list>\n\\end\n\\define iconEditor(title)\n<div class=\"tc-drop-down-wrapper\">\n<$button popup=<<qualify \"$:/state/popup/icon/$title$\">> class=\"tc-btn-invisible tc-btn-dropdown\">{{$:/core/images/down-arrow}}</$button>\n<$reveal state=<<qualify \"$:/state/popup/icon/$title$\">> type=\"popup\" position=\"belowleft\" text=\"\" default=\"\">\n<div class=\"tc-drop-down\">\n<$linkcatcher to=\"$title$!!icon\">\n<<iconEditorTab type:\"!\">>\n<hr/>\n<<iconEditorTab type:\"\">>\n</$linkcatcher>\n</div>\n</$reveal>\n</div>\n\\end\n\\define qualifyTitle(title)\n$title$$(currentTiddler)$\n\\end\n\\define toggleButton(state)\n<$reveal state=\"$state$\" type=\"match\" text=\"closed\" default=\"closed\">\n<$button set=\"$state$\" setTo=\"open\" class=\"tc-btn-invisible tc-btn-dropdown\" selectedClass=\"tc-selected\">\n{{$:/core/images/info-button}}\n</$button>\n</$reveal>\n<$reveal state=\"$state$\" type=\"match\" text=\"open\" default=\"closed\">\n<$button set=\"$state$\" setTo=\"closed\" class=\"tc-btn-invisible tc-btn-dropdown\" selectedClass=\"tc-selected\">\n{{$:/core/images/info-button}}\n</$button>\n</$reveal>\n\\end\n<table class=\"tc-tag-manager-table\">\n<tbody>\n<tr>\n<th><<lingo Colour/Heading>></th>\n<th class=\"tc-tag-manager-tag\"><<lingo Tag/Heading>></th>\n<th><<lingo Count/Heading>></th>\n<th><<lingo Icon/Heading>></th>\n<th><<lingo Info/Heading>></th>\n</tr>\n<$list filter=\"[tags[]!is[system]sort[title]]\">\n<tr>\n<td><$edit-text field=\"color\" tag=\"input\" type=\"color\"/></td>\n<td><$transclude tiddler=\"$:/core/ui/TagTemplate\"/></td>\n<td><$count filter=\"[all[current]tagging[]]\"/></td>\n<td>\n<$macrocall $name=\"iconEditor\" title={{!!title}}/>\n</td>\n<td>\n<$macrocall $name=\"toggleButton\" state=<<qualifyTitle \"$:/state/tag-manager/\">> /> \n</td>\n</tr>\n<tr>\n<td></td>\n<td colspan=\"4\">\n<$reveal state=<<qualifyTitle \"$:/state/tag-manager/\">> type=\"match\" text=\"open\" default=\"\">\n<table>\n<tbody>\n<tr><td><<lingo Colour/Heading>></td><td><$edit-text field=\"color\" tag=\"input\" type=\"text\" size=\"9\"/></td></tr>\n<tr><td><<lingo Icon/Heading>></td><td><$edit-text field=\"icon\" tag=\"input\" size=\"45\"/></td></tr>\n</tbody>\n</table>\n</$reveal>\n</td>\n</tr>\n</$list>\n<tr>\n<td></td>\n<td>\n{{$:/core/ui/UntaggedTemplate}}\n</td>\n<td>\n<small class=\"tc-menu-list-count\"><$count filter=\"[untagged[]!is[system]] -[tags[]]\"/></small>\n</td>\n<td></td>\n<td></td>\n</tr>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/TagTemplate": {
            "title": "$:/core/ui/TagTemplate",
            "text": "\\define tag-styles()\nbackground-color:$(backgroundColor)$;\nfill:$(foregroundColor)$;\ncolor:$(foregroundColor)$;\n\\end\n\n\\define tag-body-inner(colour,fallbackTarget,colourA,colourB)\n<$vars foregroundColor=<<contrastcolour target:\"\"\"$colour$\"\"\" fallbackTarget:\"\"\"$fallbackTarget$\"\"\" colourA:\"\"\"$colourA$\"\"\" colourB:\"\"\"$colourB$\"\"\">> backgroundColor=\"\"\"$colour$\"\"\">\n<$button popup=<<qualify \"$:/state/popup/tag\">> class=\"tc-btn-invisible tc-tag-label\" style=<<tag-styles>>>\n<$transclude tiddler={{!!icon}}/> <$view field=\"title\" format=\"text\" />\n</$button>\n<$reveal state=<<qualify \"$:/state/popup/tag\">> type=\"popup\" position=\"below\" animate=\"yes\" class=\"tc-drop-down\"><$transclude tiddler=\"$:/core/ui/ListItemTemplate\"/>\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TagDropdown]!has[draft.of]]\" variable=\"listItem\"> \n<$transclude tiddler=<<listItem>>/> \n</$list> \n<hr>\n<$list filter=\"[all[current]tagging[]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n</$reveal>\n</$vars>\n\\end\n\n\\define tag-body(colour,palette)\n<span class=\"tc-tag-list-item\">\n<$macrocall $name=\"tag-body-inner\" colour=\"\"\"$colour$\"\"\" fallbackTarget={{$palette$##tag-background}} colourA={{$palette$##foreground}} colourB={{$palette$##background}}/>\n</span>\n\\end\n\n<$macrocall $name=\"tag-body\" colour={{!!color}} palette={{$:/palette}}/>\n"
        },
        "$:/core/ui/TiddlerFields": {
            "title": "$:/core/ui/TiddlerFields",
            "text": "<table class=\"tc-view-field-table\">\n<tbody>\n<$list filter=\"[all[current]fields[]sort[title]] -text\" template=\"$:/core/ui/TiddlerFieldTemplate\" variable=\"listItem\"/>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/TiddlerFieldTemplate": {
            "title": "$:/core/ui/TiddlerFieldTemplate",
            "text": "<tr class=\"tc-view-field\">\n<td class=\"tc-view-field-name\">\n<$text text=<<listItem>>/>\n</td>\n<td class=\"tc-view-field-value\">\n<$view field=<<listItem>>/>\n</td>\n</tr>"
        },
        "$:/core/ui/TiddlerInfo/Advanced/PluginInfo": {
            "title": "$:/core/ui/TiddlerInfo/Advanced/PluginInfo",
            "tags": "$:/tags/TiddlerInfo/Advanced",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/Advanced/PluginInfo/\n<$list filter=\"[all[current]has[plugin-type]]\">\n\n! <<lingo Heading>>\n\n<<lingo Hint>>\n<ul>\n<$list filter=\"[all[current]plugintiddlers[]sort[title]]\" emptyMessage=<<lingo Empty/Hint>>>\n<li>\n<$link to={{!!title}}>\n<$view field=\"title\"/>\n</$link>\n</li>\n</$list>\n</ul>\n\n</$list>\n"
        },
        "$:/core/ui/TiddlerInfo/Advanced/ShadowInfo": {
            "title": "$:/core/ui/TiddlerInfo/Advanced/ShadowInfo",
            "tags": "$:/tags/TiddlerInfo/Advanced",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/Advanced/ShadowInfo/\n<$set name=\"infoTiddler\" value=<<currentTiddler>>>\n\n''<<lingo Heading>>''\n\n<$list filter=\"[all[current]!is[shadow]]\">\n\n<<lingo NotShadow/Hint>>\n\n</$list>\n\n<$list filter=\"[all[current]is[shadow]]\">\n\n<<lingo Shadow/Hint>>\n\n<$list filter=\"[all[current]shadowsource[]]\">\n\n<$set name=\"pluginTiddler\" value=<<currentTiddler>>>\n<<lingo Shadow/Source>>\n</$set>\n\n</$list>\n\n<$list filter=\"[all[current]is[shadow]is[tiddler]]\">\n\n<<lingo OverriddenShadow/Hint>>\n\n</$list>\n\n\n</$list>\n</$set>\n"
        },
        "$:/core/ui/TiddlerInfo/Advanced": {
            "title": "$:/core/ui/TiddlerInfo/Advanced",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Advanced/Caption}}",
            "text": "<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TiddlerInfo/Advanced]!has[draft.of]]\" variable=\"listItem\">\n<$transclude tiddler=<<listItem>>/>\n\n</$list>\n"
        },
        "$:/core/ui/TiddlerInfo/Fields": {
            "title": "$:/core/ui/TiddlerInfo/Fields",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Fields/Caption}}",
            "text": "<$transclude tiddler=\"$:/core/ui/TiddlerFields\"/>\n"
        },
        "$:/core/ui/TiddlerInfo/List": {
            "title": "$:/core/ui/TiddlerInfo/List",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/List/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n<$list filter=\"[list{!!title}]\" emptyMessage=<<lingo List/Empty>> template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/TiddlerInfo/Listed": {
            "title": "$:/core/ui/TiddlerInfo/Listed",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Listed/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n<$list filter=\"[all[current]listed[]!is[system]]\" emptyMessage=<<lingo Listed/Empty>> template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/TiddlerInfo/References": {
            "title": "$:/core/ui/TiddlerInfo/References",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/References/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n<$list filter=\"[all[current]backlinks[]sort[title]]\" emptyMessage=<<lingo References/Empty>> template=\"$:/core/ui/ListItemTemplate\">\n</$list>\n"
        },
        "$:/core/ui/TiddlerInfo/Tagging": {
            "title": "$:/core/ui/TiddlerInfo/Tagging",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Tagging/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n<$list filter=\"[all[current]tagging[]]\" emptyMessage=<<lingo Tagging/Empty>> template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/TiddlerInfo/Tools": {
            "title": "$:/core/ui/TiddlerInfo/Tools",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Tools/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\\define config-title()\n$:/config/ViewToolbarButtons/Visibility/$(listItem)$\n\\end\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]]\" variable=\"listItem\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>>/> <i class=\"tc-muted\"><$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</$list>\n\n</$set>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/TiddlerInfo": {
            "title": "$:/core/ui/TiddlerInfo",
            "text": "<$macrocall $name=\"tabs\" tabsList=\"[all[shadows+tiddlers]tag[$:/tags/TiddlerInfo]!has[draft.of]]\" default={{$:/config/TiddlerInfo/Default}}/>"
        },
        "$:/core/ui/TopBar/menu": {
            "title": "$:/core/ui/TopBar/menu",
            "tags": "$:/tags/TopRightBar",
            "text": "<$reveal state=\"$:/state/sidebar\" type=\"nomatch\" text=\"no\">\n<$button set=\"$:/state/sidebar\" setTo=\"no\" tooltip={{$:/language/Buttons/HideSideBar/Hint}} aria-label={{$:/language/Buttons/HideSideBar/Caption}} class=\"tc-btn-invisible\">{{$:/core/images/chevron-right}}</$button>\n</$reveal>\n<$reveal state=\"$:/state/sidebar\" type=\"match\" text=\"no\">\n<$button set=\"$:/state/sidebar\" setTo=\"yes\" tooltip={{$:/language/Buttons/ShowSideBar/Hint}} aria-label={{$:/language/Buttons/ShowSideBar/Caption}} class=\"tc-btn-invisible\">{{$:/core/images/chevron-left}}</$button>\n</$reveal>\n"
        },
        "$:/core/ui/UntaggedTemplate": {
            "title": "$:/core/ui/UntaggedTemplate",
            "text": "\\define lingo-base() $:/language/SideBar/\n<$button popup=<<qualify \"$:/state/popup/tag\">> class=\"tc-btn-invisible tc-untagged-label tc-tag-label\">\n<<lingo Tags/Untagged/Caption>>\n</$button>\n<$reveal state=<<qualify \"$:/state/popup/tag\">> type=\"popup\" position=\"below\">\n<div class=\"tc-drop-down\">\n<$list filter=\"[untagged[]!is[system]] -[tags[]] +[sort[title]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n</div>\n</$reveal>\n"
        },
        "$:/core/ui/ViewTemplate/body": {
            "title": "$:/core/ui/ViewTemplate/body",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$reveal tag=\"div\" class=\"tc-tiddler-body\" type=\"nomatch\" state=<<folded-state>> text=\"hide\" retain=\"yes\" animate=\"yes\">\n\n<$list filter=\"[all[current]!has[plugin-type]!field:hide-body[yes]]\">\n\n<$transclude>\n\n<$transclude tiddler=\"$:/language/MissingTiddler/Hint\"/>\n\n</$transclude>\n\n</$list>\n\n</$reveal>\n"
        },
        "$:/core/ui/ViewTemplate/classic": {
            "title": "$:/core/ui/ViewTemplate/classic",
            "tags": "$:/tags/ViewTemplate $:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/ClassicWarning/\n<$list filter=\"[all[current]type[text/x-tiddlywiki]]\">\n<div class=\"tc-message-box\">\n\n<<lingo Hint>>\n\n<$button set=\"!!type\" setTo=\"text/vnd.tiddlywiki\"><<lingo Upgrade/Caption>></$button>\n\n</div>\n</$list>\n"
        },
        "$:/core/ui/ViewTemplate/import": {
            "title": "$:/core/ui/ViewTemplate/import",
            "tags": "$:/tags/ViewTemplate",
            "text": "\\define lingo-base() $:/language/Import/\n\n<$list filter=\"[all[current]field:plugin-type[import]]\">\n\n<div class=\"tc-import\">\n\n<<lingo Listing/Hint>>\n\n<$button message=\"tm-delete-tiddler\" param=<<currentTiddler>>><<lingo Listing/Cancel/Caption>></$button>\n<$button message=\"tm-perform-import\" param=<<currentTiddler>>><<lingo Listing/Import/Caption>></$button>\n\n{{||$:/core/ui/ImportListing}}\n\n<$button message=\"tm-delete-tiddler\" param=<<currentTiddler>>><<lingo Listing/Cancel/Caption>></$button>\n<$button message=\"tm-perform-import\" param=<<currentTiddler>>><<lingo Listing/Import/Caption>></$button>\n\n</div>\n\n</$list>\n"
        },
        "$:/core/ui/ViewTemplate/plugin": {
            "title": "$:/core/ui/ViewTemplate/plugin",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$list filter=\"[all[current]has[plugin-type]] -[all[current]field:plugin-type[import]]\">\n\n{{||$:/core/ui/TiddlerInfo/Advanced/PluginInfo}}\n\n</$list>\n"
        },
        "$:/core/ui/ViewTemplate/subtitle": {
            "title": "$:/core/ui/ViewTemplate/subtitle",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$reveal type=\"nomatch\" state=<<folded-state>> text=\"hide\" tag=\"div\" retain=\"yes\" animate=\"yes\">\n<div class=\"tc-subtitle\">\n<$link to={{!!modifier}}>\n<$view field=\"modifier\"/>\n</$link> <$view field=\"modified\" format=\"date\" template={{$:/language/Tiddler/DateFormat}}/>\n</div>\n</$reveal>\n"
        },
        "$:/core/ui/ViewTemplate/tags": {
            "title": "$:/core/ui/ViewTemplate/tags",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$reveal type=\"nomatch\" state=<<folded-state>> text=\"hide\" tag=\"div\" retain=\"yes\" animate=\"yes\">\n<div class=\"tc-tags-wrapper\"><$list filter=\"[all[current]tags[]sort[title]]\" template=\"$:/core/ui/TagTemplate\" storyview=\"pop\"/></div>\n</$reveal>"
        },
        "$:/core/ui/ViewTemplate/title": {
            "title": "$:/core/ui/ViewTemplate/title",
            "tags": "$:/tags/ViewTemplate",
            "text": "\\define title-styles()\nfill:$(foregroundColor)$;\n\\end\n\\define config-title()\n$:/config/ViewToolbarButtons/Visibility/$(listItem)$\n\\end\n<div class=\"tc-tiddler-title\">\n<div class=\"tc-titlebar\">\n<span class=\"tc-tiddler-controls\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]]\" variable=\"listItem\"><$reveal type=\"nomatch\" state=<<config-title>> text=\"hide\"><$transclude tiddler=<<listItem>>/></$reveal></$list>\n</span>\n<$set name=\"tv-wikilinks\" value={{$:/config/Tiddlers/TitleLinks}}>\n<$link>\n<$set name=\"foregroundColor\" value={{!!color}}>\n<span class=\"tc-tiddler-title-icon\" style=<<title-styles>>>\n<$transclude tiddler={{!!icon}}/>\n</span>\n</$set>\n<$list filter=\"[all[current]removeprefix[$:/]]\">\n<h2 class=\"tc-title\" title={{$:/language/SystemTiddler/Tooltip}}>\n<span class=\"tc-system-title-prefix\">$:/</span><$text text=<<currentTiddler>>/>\n</h2>\n</$list>\n<$list filter=\"[all[current]!prefix[$:/]]\">\n<h2 class=\"tc-title\">\n<$view field=\"title\"/>\n</h2>\n</$list>\n</$link>\n</$set>\n</div>\n\n<$reveal type=\"nomatch\" text=\"\" default=\"\" state=<<tiddlerInfoState>> class=\"tc-tiddler-info tc-popup-handle\" animate=\"yes\" retain=\"yes\">\n\n<$transclude tiddler=\"$:/core/ui/TiddlerInfo\"/>\n\n</$reveal>\n</div>"
        },
        "$:/core/ui/ViewTemplate/unfold": {
            "title": "$:/core/ui/ViewTemplate/unfold",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$reveal tag=\"div\" type=\"nomatch\" state=\"$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-bar\" text=\"hide\">\n<$reveal tag=\"div\" type=\"nomatch\" state=<<folded-state>> text=\"hide\" default=\"show\" retain=\"yes\" animate=\"yes\">\n<$button tooltip={{$:/language/Buttons/Fold/Hint}} aria-label={{$:/language/Buttons/Fold/Caption}} class=\"tc-fold-banner\">\n<$action-sendmessage $message=\"tm-fold-tiddler\" $param=<<currentTiddler>> foldedState=<<folded-state>>/>\n{{$:/core/images/chevron-up}}\n</$button>\n</$reveal>\n<$reveal tag=\"div\" type=\"nomatch\" state=<<folded-state>> text=\"show\" default=\"show\" retain=\"yes\" animate=\"yes\">\n<$button tooltip={{$:/language/Buttons/Unfold/Hint}} aria-label={{$:/language/Buttons/Unfold/Caption}} class=\"tc-unfold-banner\">\n<$action-sendmessage $message=\"tm-fold-tiddler\" $param=<<currentTiddler>> foldedState=<<folded-state>>/>\n{{$:/core/images/chevron-down}}\n</$button>\n</$reveal>\n</$reveal>\n"
        },
        "$:/core/ui/ViewTemplate": {
            "title": "$:/core/ui/ViewTemplate",
            "text": "\\define frame-classes()\ntc-tiddler-frame tc-tiddler-view-frame $(missingTiddlerClass)$ $(shadowTiddlerClass)$ $(systemTiddlerClass)$ $(tiddlerTagClasses)$\n\\end\n\\define folded-state()\n$:/state/folded/$(currentTiddler)$\n\\end\n<$set name=\"storyTiddler\" value=<<currentTiddler>>><$set name=\"tiddlerInfoState\" value=<<qualify \"$:/state/popup/tiddler-info\">>><$tiddler tiddler=<<currentTiddler>>><div class=<<frame-classes>>><$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewTemplate]!has[draft.of]]\" variable=\"listItem\"><$transclude tiddler=<<listItem>>/></$list>\n</div>\n</$tiddler></$set></$set>\n"
        },
        "$:/core/ui/Buttons/clone": {
            "title": "$:/core/ui/Buttons/clone",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/clone-button}} {{$:/language/Buttons/Clone/Caption}}",
            "description": "{{$:/language/Buttons/Clone/Hint}}",
            "text": "<$button message=\"tm-new-tiddler\" param=<<currentTiddler>> tooltip={{$:/language/Buttons/Clone/Hint}} aria-label={{$:/language/Buttons/Clone/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/clone-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Clone/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/close-others": {
            "title": "$:/core/ui/Buttons/close-others",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/close-others-button}} {{$:/language/Buttons/CloseOthers/Caption}}",
            "description": "{{$:/language/Buttons/CloseOthers/Hint}}",
            "text": "<$button message=\"tm-close-other-tiddlers\" param=<<currentTiddler>> tooltip={{$:/language/Buttons/CloseOthers/Hint}} aria-label={{$:/language/Buttons/CloseOthers/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/close-others-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/CloseOthers/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/close": {
            "title": "$:/core/ui/Buttons/close",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/close-button}} {{$:/language/Buttons/Close/Caption}}",
            "description": "{{$:/language/Buttons/Close/Hint}}",
            "text": "<$button message=\"tm-close-tiddler\" tooltip={{$:/language/Buttons/Close/Hint}} aria-label={{$:/language/Buttons/Close/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/close-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Close/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/edit": {
            "title": "$:/core/ui/Buttons/edit",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/edit-button}} {{$:/language/Buttons/Edit/Caption}}",
            "description": "{{$:/language/Buttons/Edit/Hint}}",
            "text": "<$button message=\"tm-edit-tiddler\" tooltip={{$:/language/Buttons/Edit/Hint}} aria-label={{$:/language/Buttons/Edit/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/edit-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Edit/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/export-tiddler": {
            "title": "$:/core/ui/Buttons/export-tiddler",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/export-button}} {{$:/language/Buttons/ExportTiddler/Caption}}",
            "description": "{{$:/language/Buttons/ExportTiddler/Hint}}",
            "text": "\\define makeExportFilter()\n[[$(currentTiddler)$]]\n\\end\n<$macrocall $name=\"exportButton\" exportFilter=<<makeExportFilter>> lingoBase=\"$:/language/Buttons/ExportTiddler/\" baseFilename=<<currentTiddler>>/>"
        },
        "$:/core/ui/Buttons/fold-bar": {
            "title": "$:/core/ui/Buttons/fold-bar",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/language/Buttons/Fold/FoldBar/Caption}}",
            "description": "{{$:/language/Buttons/Fold/FoldBar/Hint}}",
            "text": "<!-- This dummy toolbar button is here to allow visibility of the fold-bar to be controlled as if it were a toolbar button -->"
        },
        "$:/core/ui/Buttons/fold-others": {
            "title": "$:/core/ui/Buttons/fold-others",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/fold-others-button}} {{$:/language/Buttons/FoldOthers/Caption}}",
            "description": "{{$:/language/Buttons/FoldOthers/Hint}}",
            "text": "<$button tooltip={{$:/language/Buttons/FoldOthers/Hint}} aria-label={{$:/language/Buttons/FoldOthers/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-fold-other-tiddlers\" $param=<<currentTiddler>> foldedStatePrefix=\"$:/state/folded/\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/fold-others-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/FoldOthers/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/fold": {
            "title": "$:/core/ui/Buttons/fold",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/fold-button}} {{$:/language/Buttons/Fold/Caption}}",
            "description": "{{$:/language/Buttons/Fold/Hint}}",
            "text": "<$reveal type=\"nomatch\" state=<<folded-state>> text=\"hide\" default=\"show\"><$button tooltip={{$:/language/Buttons/Fold/Hint}} aria-label={{$:/language/Buttons/Fold/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-fold-tiddler\" $param=<<currentTiddler>> foldedState=<<folded-state>>/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/fold-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\">\n<$text text={{$:/language/Buttons/Fold/Caption}}/>\n</span>\n</$list>\n</$button></$reveal><$reveal type=\"match\" state=<<folded-state>> text=\"hide\" default=\"show\"><$button tooltip={{$:/language/Buttons/Unfold/Hint}} aria-label={{$:/language/Buttons/Unfold/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-fold-tiddler\" $param=<<currentTiddler>> foldedState=<<folded-state>>/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/unfold-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\">\n<$text text={{$:/language/Buttons/Unfold/Caption}}/>\n</span>\n</$list>\n</$button></$reveal>"
        },
        "$:/core/ui/Buttons/info": {
            "title": "$:/core/ui/Buttons/info",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/info-button}} {{$:/language/Buttons/Info/Caption}}",
            "description": "{{$:/language/Buttons/Info/Hint}}",
            "text": "<$button popup=<<tiddlerInfoState>> tooltip={{$:/language/Buttons/Info/Hint}} aria-label={{$:/language/Buttons/Info/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/info-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Info/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/more-tiddler-actions": {
            "title": "$:/core/ui/Buttons/more-tiddler-actions",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/down-arrow}} {{$:/language/Buttons/More/Caption}}",
            "description": "{{$:/language/Buttons/More/Hint}}",
            "text": "\\define config-title()\n$:/config/ViewToolbarButtons/Visibility/$(listItem)$\n\\end\n<$button popup=<<qualify \"$:/state/popup/more\">> tooltip={{$:/language/Buttons/More/Hint}} aria-label={{$:/language/Buttons/More/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/down-arrow}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/More/Caption}}/></span>\n</$list>\n</$button><$reveal state=<<qualify \"$:/state/popup/more\">> type=\"popup\" position=\"below\" animate=\"yes\">\n\n<div class=\"tc-drop-down\">\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"tc-btn-invisible\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]] -[[$:/core/ui/Buttons/more-tiddler-actions]]\" variable=\"listItem\">\n\n<$reveal type=\"match\" state=<<config-title>> text=\"hide\">\n\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n\n</$reveal>\n\n</$list>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</div>\n\n</$reveal>"
        },
        "$:/core/ui/Buttons/new-here": {
            "title": "$:/core/ui/Buttons/new-here",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/new-here-button}} {{$:/language/Buttons/NewHere/Caption}}",
            "description": "{{$:/language/Buttons/NewHere/Hint}}",
            "text": "\\define newHereButtonTags()\n[[$(currentTiddler)$]]\n\\end\n\\define newHereButton()\n<$button tooltip={{$:/language/Buttons/NewHere/Hint}} aria-label={{$:/language/Buttons/NewHere/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-new-tiddler\" tags=<<newHereButtonTags>>/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-here-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewHere/Caption}}/></span>\n</$list>\n</$button>\n\\end\n<<newHereButton>>"
        },
        "$:/core/ui/Buttons/new-journal-here": {
            "title": "$:/core/ui/Buttons/new-journal-here",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/new-journal-button}} {{$:/language/Buttons/NewJournalHere/Caption}}",
            "description": "{{$:/language/Buttons/NewJournalHere/Hint}}",
            "text": "\\define journalButtonTags()\n[[$(currentTiddlerTag)$]] $(journalTags)$\n\\end\n\\define journalButton()\n<$button tooltip={{$:/language/Buttons/NewJournalHere/Hint}} aria-label={{$:/language/Buttons/NewJournalHere/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-new-tiddler\" title=<<now \"$(journalTitleTemplate)$\">> tags=<<journalButtonTags>>/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-journal-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewJournalHere/Caption}}/></span>\n</$list>\n</$button>\n\\end\n<$set name=\"journalTitleTemplate\" value={{$:/config/NewJournal/Title}}>\n<$set name=\"journalTags\" value={{$:/config/NewJournal/Tags}}>\n<$set name=\"currentTiddlerTag\" value=<<currentTiddler>>>\n<<journalButton>>\n</$set></$set></$set>"
        },
        "$:/core/ui/Buttons/open-window": {
            "title": "$:/core/ui/Buttons/open-window",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/open-window}} {{$:/language/Buttons/OpenWindow/Caption}}",
            "description": "{{$:/language/Buttons/OpenWindow/Hint}}",
            "text": "<$button message=\"tm-open-window\" tooltip={{$:/language/Buttons/OpenWindow/Hint}} aria-label={{$:/language/Buttons/OpenWindow/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/open-window}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/OpenWindow/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/permalink": {
            "title": "$:/core/ui/Buttons/permalink",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/permalink-button}} {{$:/language/Buttons/Permalink/Caption}}",
            "description": "{{$:/language/Buttons/Permalink/Hint}}",
            "text": "<$button message=\"tm-permalink\" tooltip={{$:/language/Buttons/Permalink/Hint}} aria-label={{$:/language/Buttons/Permalink/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/permalink-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Permalink/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/permaview": {
            "title": "$:/core/ui/Buttons/permaview",
            "tags": "$:/tags/ViewToolbar $:/tags/PageControls",
            "caption": "{{$:/core/images/permaview-button}} {{$:/language/Buttons/Permaview/Caption}}",
            "description": "{{$:/language/Buttons/Permaview/Hint}}",
            "text": "<$button message=\"tm-permaview\" tooltip={{$:/language/Buttons/Permaview/Hint}} aria-label={{$:/language/Buttons/Permaview/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/permaview-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Permaview/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/temp/advancedsearch": {
            "title": "$:/temp/advancedsearch",
            "text": ""
        },
        "$:/snippets/allfields": {
            "title": "$:/snippets/allfields",
            "text": "\\define renderfield(title)\n<tr class=\"tc-view-field\"><td class=\"tc-view-field-name\">''$title$'':</td><td class=\"tc-view-field-value\">//{{$:/language/Docs/Fields/$title$}}//</td></tr>\n\\end\n<table class=\"tc-view-field-table\"><tbody><$list filter=\"[fields[]sort[title]]\" variable=\"listItem\"><$macrocall $name=\"renderfield\" title=<<listItem>>/></$list>\n</tbody></table>\n"
        },
        "$:/config/AnimationDuration": {
            "title": "$:/config/AnimationDuration",
            "text": "400"
        },
        "$:/config/AutoSave": {
            "title": "$:/config/AutoSave",
            "text": "yes"
        },
        "$:/config/BitmapEditor/Colour": {
            "title": "$:/config/BitmapEditor/Colour",
            "text": "#444"
        },
        "$:/config/BitmapEditor/ImageSizes": {
            "title": "$:/config/BitmapEditor/ImageSizes",
            "text": "[[62px 100px]] [[100px 62px]] [[124px 200px]] [[200px 124px]] [[248px 400px]] [[371px 600px]] [[400px 248px]] [[556px 900px]] [[600px 371px]] [[742px 1200px]] [[900px 556px]] [[1200px 742px]]"
        },
        "$:/config/BitmapEditor/LineWidth": {
            "title": "$:/config/BitmapEditor/LineWidth",
            "text": "3px"
        },
        "$:/config/BitmapEditor/LineWidths": {
            "title": "$:/config/BitmapEditor/LineWidths",
            "text": "0.25px 0.5px 1px 2px 3px 4px 6px 8px 10px 16px 20px 28px 40px 56px 80px"
        },
        "$:/config/BitmapEditor/Opacities": {
            "title": "$:/config/BitmapEditor/Opacities",
            "text": "0.01 0.025 0.05 0.075 0.1 0.15 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0"
        },
        "$:/config/BitmapEditor/Opacity": {
            "title": "$:/config/BitmapEditor/Opacity",
            "text": "1.0"
        },
        "$:/config/DefaultSidebarTab": {
            "title": "$:/config/DefaultSidebarTab",
            "text": "$:/core/ui/SideBar/Open"
        },
        "$:/config/Drafts/TypingTimeout": {
            "title": "$:/config/Drafts/TypingTimeout",
            "text": "400"
        },
        "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-4": {
            "title": "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-4",
            "text": "hide"
        },
        "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-5": {
            "title": "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-5",
            "text": "hide"
        },
        "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-6": {
            "title": "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-6",
            "text": "hide"
        },
        "$:/config/EditorTypeMappings/image/gif": {
            "title": "$:/config/EditorTypeMappings/image/gif",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/image/jpeg": {
            "title": "$:/config/EditorTypeMappings/image/jpeg",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/image/jpg": {
            "title": "$:/config/EditorTypeMappings/image/jpg",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/image/png": {
            "title": "$:/config/EditorTypeMappings/image/png",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/image/x-icon": {
            "title": "$:/config/EditorTypeMappings/image/x-icon",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/text/vnd.tiddlywiki": {
            "title": "$:/config/EditorTypeMappings/text/vnd.tiddlywiki",
            "text": "text"
        },
        "$:/config/EditTemplateFields/Visibility/title": {
            "title": "$:/config/EditTemplateFields/Visibility/title",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/tags": {
            "title": "$:/config/EditTemplateFields/Visibility/tags",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/text": {
            "title": "$:/config/EditTemplateFields/Visibility/text",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/creator": {
            "title": "$:/config/EditTemplateFields/Visibility/creator",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/created": {
            "title": "$:/config/EditTemplateFields/Visibility/created",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/modified": {
            "title": "$:/config/EditTemplateFields/Visibility/modified",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/modifier": {
            "title": "$:/config/EditTemplateFields/Visibility/modifier",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/type": {
            "title": "$:/config/EditTemplateFields/Visibility/type",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/draft.title": {
            "title": "$:/config/EditTemplateFields/Visibility/draft.title",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/draft.of": {
            "title": "$:/config/EditTemplateFields/Visibility/draft.of",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/revision": {
            "title": "$:/config/EditTemplateFields/Visibility/revision",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/bag": {
            "title": "$:/config/EditTemplateFields/Visibility/bag",
            "text": "hide"
        },
        "$:/config/MissingLinks": {
            "title": "$:/config/MissingLinks",
            "text": "yes"
        },
        "$:/config/Navigation/UpdateAddressBar": {
            "title": "$:/config/Navigation/UpdateAddressBar",
            "text": "no"
        },
        "$:/config/Navigation/UpdateHistory": {
            "title": "$:/config/Navigation/UpdateHistory",
            "text": "no"
        },
        "$:/config/OfficialPluginLibrary": {
            "title": "$:/config/OfficialPluginLibrary",
            "tags": "$:/tags/PluginLibrary",
            "url": "http://tiddlywiki.com/library/v5.1.13/index.html",
            "caption": "{{$:/language/OfficialPluginLibrary}}",
            "text": "{{$:/language/OfficialPluginLibrary/Hint}}\n"
        },
        "$:/config/Navigation/openLinkFromInsideRiver": {
            "title": "$:/config/Navigation/openLinkFromInsideRiver",
            "text": "below"
        },
        "$:/config/Navigation/openLinkFromOutsideRiver": {
            "title": "$:/config/Navigation/openLinkFromOutsideRiver",
            "text": "top"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/advanced-search": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/advanced-search",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/close-all": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/close-all",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/encryption": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/encryption",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/export-page": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/export-page",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/fold-all": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/fold-all",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/full-screen": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/full-screen",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/refresh": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/refresh",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/import": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/import",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/language": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/language",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/tag-manager": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/tag-manager",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/more-page-actions": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/more-page-actions",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-journal": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-journal",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-image": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-image",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/palette": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/palette",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/permaview": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/permaview",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/storyview": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/storyview",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/theme": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/theme",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/unfold-all": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/unfold-all",
            "text": "hide"
        },
        "$:/config/Performance/Instrumentation": {
            "title": "$:/config/Performance/Instrumentation",
            "text": "no"
        },
        "$:/config/SaverFilter": {
            "title": "$:/config/SaverFilter",
            "text": "[all[]] -[[$:/HistoryList]] -[[$:/StoryList]] -[[$:/Import]] -[[$:/isEncrypted]] -[[$:/UploadName]] -[prefix[$:/state/]] -[prefix[$:/temp/]]"
        },
        "$:/config/SaveWikiButton/Template": {
            "title": "$:/config/SaveWikiButton/Template",
            "text": "$:/core/save/all"
        },
        "$:/config/Search/AutoFocus": {
            "title": "$:/config/Search/AutoFocus",
            "text": "true"
        },
        "$:/config/SearchResults/Default": {
            "title": "$:/config/SearchResults/Default",
            "text": "$:/core/ui/DefaultSearchResultList"
        },
        "$:/config/ShortcutInfo/bold": {
            "title": "$:/config/ShortcutInfo/bold",
            "text": "{{$:/language/Buttons/Bold/Hint}}"
        },
        "$:/config/ShortcutInfo/cancel-edit-tiddler": {
            "title": "$:/config/ShortcutInfo/cancel-edit-tiddler",
            "text": "{{$:/language/Buttons/Cancel/Hint}}"
        },
        "$:/config/ShortcutInfo/excise": {
            "title": "$:/config/ShortcutInfo/excise",
            "text": "{{$:/language/Buttons/Excise/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-1": {
            "title": "$:/config/ShortcutInfo/heading-1",
            "text": "{{$:/language/Buttons/Heading1/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-2": {
            "title": "$:/config/ShortcutInfo/heading-2",
            "text": "{{$:/language/Buttons/Heading2/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-3": {
            "title": "$:/config/ShortcutInfo/heading-3",
            "text": "{{$:/language/Buttons/Heading3/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-4": {
            "title": "$:/config/ShortcutInfo/heading-4",
            "text": "{{$:/language/Buttons/Heading4/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-5": {
            "title": "$:/config/ShortcutInfo/heading-5",
            "text": "{{$:/language/Buttons/Heading5/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-6": {
            "title": "$:/config/ShortcutInfo/heading-6",
            "text": "{{$:/language/Buttons/Heading6/Hint}}"
        },
        "$:/config/ShortcutInfo/italic": {
            "title": "$:/config/ShortcutInfo/italic",
            "text": "{{$:/language/Buttons/Italic/Hint}}"
        },
        "$:/config/ShortcutInfo/link": {
            "title": "$:/config/ShortcutInfo/link",
            "text": "{{$:/language/Buttons/Link/Hint}}"
        },
        "$:/config/ShortcutInfo/list-bullet": {
            "title": "$:/config/ShortcutInfo/list-bullet",
            "text": "{{$:/language/Buttons/ListBullet/Hint}}"
        },
        "$:/config/ShortcutInfo/list-number": {
            "title": "$:/config/ShortcutInfo/list-number",
            "text": "{{$:/language/Buttons/ListNumber/Hint}}"
        },
        "$:/config/ShortcutInfo/mono-block": {
            "title": "$:/config/ShortcutInfo/mono-block",
            "text": "{{$:/language/Buttons/MonoBlock/Hint}}"
        },
        "$:/config/ShortcutInfo/mono-line": {
            "title": "$:/config/ShortcutInfo/mono-line",
            "text": "{{$:/language/Buttons/MonoLine/Hint}}"
        },
        "$:/config/ShortcutInfo/picture": {
            "title": "$:/config/ShortcutInfo/picture",
            "text": "{{$:/language/Buttons/Picture/Hint}}"
        },
        "$:/config/ShortcutInfo/preview": {
            "title": "$:/config/ShortcutInfo/preview",
            "text": "{{$:/language/Buttons/Preview/Hint}}"
        },
        "$:/config/ShortcutInfo/quote": {
            "title": "$:/config/ShortcutInfo/quote",
            "text": "{{$:/language/Buttons/Quote/Hint}}"
        },
        "$:/config/ShortcutInfo/save-tiddler": {
            "title": "$:/config/ShortcutInfo/save-tiddler",
            "text": "{{$:/language/Buttons/Save/Hint}}"
        },
        "$:/config/ShortcutInfo/stamp": {
            "title": "$:/config/ShortcutInfo/stamp",
            "text": "{{$:/language/Buttons/Stamp/Hint}}"
        },
        "$:/config/ShortcutInfo/strikethrough": {
            "title": "$:/config/ShortcutInfo/strikethrough",
            "text": "{{$:/language/Buttons/Strikethrough/Hint}}"
        },
        "$:/config/ShortcutInfo/subscript": {
            "title": "$:/config/ShortcutInfo/subscript",
            "text": "{{$:/language/Buttons/Subscript/Hint}}"
        },
        "$:/config/ShortcutInfo/superscript": {
            "title": "$:/config/ShortcutInfo/superscript",
            "text": "{{$:/language/Buttons/Superscript/Hint}}"
        },
        "$:/config/ShortcutInfo/underline": {
            "title": "$:/config/ShortcutInfo/underline",
            "text": "{{$:/language/Buttons/Underline/Hint}}"
        },
        "$:/config/shortcuts-mac/bold": {
            "title": "$:/config/shortcuts-mac/bold",
            "text": "meta-B"
        },
        "$:/config/shortcuts-mac/italic": {
            "title": "$:/config/shortcuts-mac/italic",
            "text": "meta-I"
        },
        "$:/config/shortcuts-mac/underline": {
            "title": "$:/config/shortcuts-mac/underline",
            "text": "meta-U"
        },
        "$:/config/shortcuts-not-mac/bold": {
            "title": "$:/config/shortcuts-not-mac/bold",
            "text": "ctrl-B"
        },
        "$:/config/shortcuts-not-mac/italic": {
            "title": "$:/config/shortcuts-not-mac/italic",
            "text": "ctrl-I"
        },
        "$:/config/shortcuts-not-mac/underline": {
            "title": "$:/config/shortcuts-not-mac/underline",
            "text": "ctrl-U"
        },
        "$:/config/shortcuts/cancel-edit-tiddler": {
            "title": "$:/config/shortcuts/cancel-edit-tiddler",
            "text": "escape"
        },
        "$:/config/shortcuts/excise": {
            "title": "$:/config/shortcuts/excise",
            "text": "ctrl-E"
        },
        "$:/config/shortcuts/heading-1": {
            "title": "$:/config/shortcuts/heading-1",
            "text": "ctrl-1"
        },
        "$:/config/shortcuts/heading-2": {
            "title": "$:/config/shortcuts/heading-2",
            "text": "ctrl-2"
        },
        "$:/config/shortcuts/heading-3": {
            "title": "$:/config/shortcuts/heading-3",
            "text": "ctrl-3"
        },
        "$:/config/shortcuts/heading-4": {
            "title": "$:/config/shortcuts/heading-4",
            "text": "ctrl-4"
        },
        "$:/config/shortcuts/heading-5": {
            "title": "$:/config/shortcuts/heading-5",
            "text": "ctrl-5"
        },
        "$:/config/shortcuts/heading-6": {
            "title": "$:/config/shortcuts/heading-6",
            "text": "ctrl-6"
        },
        "$:/config/shortcuts/link": {
            "title": "$:/config/shortcuts/link",
            "text": "ctrl-L"
        },
        "$:/config/shortcuts/list-bullet": {
            "title": "$:/config/shortcuts/list-bullet",
            "text": "ctrl-shift-L"
        },
        "$:/config/shortcuts/list-number": {
            "title": "$:/config/shortcuts/list-number",
            "text": "ctrl-shift-N"
        },
        "$:/config/shortcuts/mono-block": {
            "title": "$:/config/shortcuts/mono-block",
            "text": "ctrl-shift-M"
        },
        "$:/config/shortcuts/mono-line": {
            "title": "$:/config/shortcuts/mono-line",
            "text": "ctrl-M"
        },
        "$:/config/shortcuts/picture": {
            "title": "$:/config/shortcuts/picture",
            "text": "ctrl-shift-I"
        },
        "$:/config/shortcuts/preview": {
            "title": "$:/config/shortcuts/preview",
            "text": "alt-P"
        },
        "$:/config/shortcuts/quote": {
            "title": "$:/config/shortcuts/quote",
            "text": "ctrl-Q"
        },
        "$:/config/shortcuts/save-tiddler": {
            "title": "$:/config/shortcuts/save-tiddler",
            "text": "ctrl+enter"
        },
        "$:/config/shortcuts/stamp": {
            "title": "$:/config/shortcuts/stamp",
            "text": "ctrl-S"
        },
        "$:/config/shortcuts/strikethrough": {
            "title": "$:/config/shortcuts/strikethrough",
            "text": "ctrl-T"
        },
        "$:/config/shortcuts/subscript": {
            "title": "$:/config/shortcuts/subscript",
            "text": "ctrl-shift-B"
        },
        "$:/config/shortcuts/superscript": {
            "title": "$:/config/shortcuts/superscript",
            "text": "ctrl-shift-P"
        },
        "$:/config/SyncFilter": {
            "title": "$:/config/SyncFilter",
            "text": "[is[tiddler]] -[[$:/HistoryList]] -[[$:/Import]] -[[$:/isEncrypted]] -[prefix[$:/status/]] -[prefix[$:/state/]] -[prefix[$:/temp/]]"
        },
        "$:/config/TextEditor/EditorHeight/Height": {
            "title": "$:/config/TextEditor/EditorHeight/Height",
            "text": "400px"
        },
        "$:/config/TextEditor/EditorHeight/Mode": {
            "title": "$:/config/TextEditor/EditorHeight/Mode",
            "text": "auto"
        },
        "$:/config/TiddlerInfo/Default": {
            "title": "$:/config/TiddlerInfo/Default",
            "text": "$:/core/ui/TiddlerInfo/Fields"
        },
        "$:/config/Tiddlers/TitleLinks": {
            "title": "$:/config/Tiddlers/TitleLinks",
            "text": "no"
        },
        "$:/config/Toolbar/ButtonClass": {
            "title": "$:/config/Toolbar/ButtonClass",
            "text": "tc-btn-invisible"
        },
        "$:/config/Toolbar/Icons": {
            "title": "$:/config/Toolbar/Icons",
            "text": "yes"
        },
        "$:/config/Toolbar/Text": {
            "title": "$:/config/Toolbar/Text",
            "text": "no"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/clone": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/clone",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/close-others": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/close-others",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/export-tiddler": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/export-tiddler",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/info": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/info",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions",
            "text": "show"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/new-here": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/new-here",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/new-journal-here": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/new-journal-here",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/open-window": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/open-window",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/permalink": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/permalink",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/permaview": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/permaview",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/delete": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/delete",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-bar": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-bar",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-others": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-others",
            "text": "hide"
        },
        "$:/config/WikiParserRules/Inline/wikilink": {
            "title": "$:/config/WikiParserRules/Inline/wikilink",
            "text": "enable"
        },
        "$:/snippets/currpalettepreview": {
            "title": "$:/snippets/currpalettepreview",
            "text": "\\define swatchStyle()\nbackground-color: $(swatchColour)$;\n\\end\n\\define swatch(colour)\n<$set name=\"swatchColour\" value={{##$colour$}}>\n<div class=\"tc-swatch\" style=<<swatchStyle>>/>\n</$set>\n\\end\n<div class=\"tc-swatches-horiz\">\n<<swatch foreground>>\n<<swatch background>>\n<<swatch muted-foreground>>\n<<swatch primary>>\n<<swatch page-background>>\n<<swatch tab-background>>\n<<swatch tiddler-info-background>>\n</div>\n"
        },
        "$:/DefaultTiddlers": {
            "title": "$:/DefaultTiddlers",
            "text": "GettingStarted\n"
        },
        "$:/snippets/download-wiki-button": {
            "title": "$:/snippets/download-wiki-button",
            "text": "\\define lingo-base() $:/language/ControlPanel/Tools/Download/\n<$button class=\"tc-btn-big-green\">\n<$action-sendmessage $message=\"tm-download-file\" $param=\"$:/core/save/all\" filename=\"index.html\"/>\n<<lingo Full/Caption>> {{$:/core/images/save-button}}\n</$button>"
        },
        "$:/language": {
            "title": "$:/language",
            "text": "$:/languages/en-GB"
        },
        "$:/snippets/languageswitcher": {
            "title": "$:/snippets/languageswitcher",
            "text": "{{$:/language/ControlPanel/Basics/Language/Prompt}} <$select tiddler=\"$:/language\">\n<$list filter=\"[[$:/languages/en-GB]] [plugin-type[language]sort[description]]\">\n<option value=<<currentTiddler>>><$view field=\"description\"><$view field=\"name\"><$view field=\"title\"/></$view></$view></option>\n</$list>\n</$select>"
        },
        "$:/core/macros/colour-picker": {
            "title": "$:/core/macros/colour-picker",
            "tags": "$:/tags/Macro",
            "text": "\\define colour-picker-update-recent()\n<$action-listops\n\t$tiddler=\"$:/config/ColourPicker/Recent\"\n\t$subfilter=\"$(colour-picker-value)$ [list[$:/config/ColourPicker/Recent]remove[$(colour-picker-value)$]] +[limit[8]]\"\n/>\n\\end\n\n\\define colour-picker-inner(actions)\n<$button tag=\"a\" tooltip=\"\"\"$(colour-picker-value)$\"\"\">\n\n$(colour-picker-update-recent)$\n\n$actions$\n\n<div style=\"background-color: $(colour-picker-value)$; width: 100%; height: 100%; border-radius: 50%;\"/>\n\n</$button>\n\\end\n\n\\define colour-picker-recent-inner(actions)\n<$set name=\"colour-picker-value\" value=\"$(recentColour)$\">\n<$macrocall $name=\"colour-picker-inner\" actions=\"\"\"$actions$\"\"\"/>\n</$set>\n\\end\n\n\\define colour-picker-recent(actions)\n{{$:/language/ColourPicker/Recent}} <$list filter=\"[list[$:/config/ColourPicker/Recent]]\" variable=\"recentColour\">\n<$macrocall $name=\"colour-picker-recent-inner\" actions=\"\"\"$actions$\"\"\"/></$list>\n\\end\n\n\\define colour-picker(actions)\n<div class=\"tc-colour-chooser\">\n\n<$macrocall $name=\"colour-picker-recent\" actions=\"\"\"$actions$\"\"\"/>\n\n---\n\n<$list filter=\"LightPink Pink Crimson LavenderBlush PaleVioletRed HotPink DeepPink MediumVioletRed Orchid Thistle Plum Violet Magenta Fuchsia DarkMagenta Purple MediumOrchid DarkViolet DarkOrchid Indigo BlueViolet MediumPurple MediumSlateBlue SlateBlue DarkSlateBlue Lavender GhostWhite Blue MediumBlue MidnightBlue DarkBlue Navy RoyalBlue CornflowerBlue LightSteelBlue LightSlateGrey SlateGrey DodgerBlue AliceBlue SteelBlue LightSkyBlue SkyBlue DeepSkyBlue LightBlue PowderBlue CadetBlue Azure LightCyan PaleTurquoise Cyan Aqua DarkTurquoise DarkSlateGrey DarkCyan Teal MediumTurquoise LightSeaGreen Turquoise Aquamarine MediumAquamarine MediumSpringGreen MintCream SpringGreen MediumSeaGreen SeaGreen Honeydew LightGreen PaleGreen DarkSeaGreen LimeGreen Lime ForestGreen Green DarkGreen Chartreuse LawnGreen GreenYellow DarkOliveGreen YellowGreen OliveDrab Beige LightGoldenrodYellow Ivory LightYellow Yellow Olive DarkKhaki LemonChiffon PaleGoldenrod Khaki Gold Cornsilk Goldenrod DarkGoldenrod FloralWhite OldLace Wheat Moccasin Orange PapayaWhip BlanchedAlmond NavajoWhite AntiqueWhite Tan BurlyWood Bisque DarkOrange Linen Peru PeachPuff SandyBrown Chocolate SaddleBrown Seashell Sienna LightSalmon Coral OrangeRed DarkSalmon Tomato MistyRose Salmon Snow LightCoral RosyBrown IndianRed Red Brown FireBrick DarkRed Maroon White WhiteSmoke Gainsboro LightGrey Silver DarkGrey Grey DimGrey Black\" variable=\"colour-picker-value\">\n<$macrocall $name=\"colour-picker-inner\" actions=\"\"\"$actions$\"\"\"/>\n</$list>\n\n---\n\n<$edit-text tiddler=\"$:/config/ColourPicker/New\" tag=\"input\" default=\"\" placeholder=\"\"/> \n<$edit-text tiddler=\"$:/config/ColourPicker/New\" type=\"color\" tag=\"input\"/>\n<$set name=\"colour-picker-value\" value={{$:/config/ColourPicker/New}}>\n<$macrocall $name=\"colour-picker-inner\" actions=\"\"\"$actions$\"\"\"/>\n</$set>\n\n</div>\n\n\\end\n"
        },
        "$:/core/macros/CSS": {
            "title": "$:/core/macros/CSS",
            "tags": "$:/tags/Macro",
            "text": "\\define colour(name)\n<$transclude tiddler={{$:/palette}} index=\"$name$\"><$transclude tiddler=\"$:/palettes/Vanilla\" index=\"$name$\"/></$transclude>\n\\end\n\n\\define color(name)\n<<colour $name$>>\n\\end\n\n\\define box-shadow(shadow)\n``\n  -webkit-box-shadow: $shadow$;\n     -moz-box-shadow: $shadow$;\n          box-shadow: $shadow$;\n``\n\\end\n\n\\define filter(filter)\n``\n  -webkit-filter: $filter$;\n     -moz-filter: $filter$;\n          filter: $filter$;\n``\n\\end\n\n\\define transition(transition)\n``\n  -webkit-transition: $transition$;\n     -moz-transition: $transition$;\n          transition: $transition$;\n``\n\\end\n\n\\define transform-origin(origin)\n``\n  -webkit-transform-origin: $origin$;\n     -moz-transform-origin: $origin$;\n          transform-origin: $origin$;\n``\n\\end\n\n\\define background-linear-gradient(gradient)\n``\nbackground-image: linear-gradient($gradient$);\nbackground-image: -o-linear-gradient($gradient$);\nbackground-image: -moz-linear-gradient($gradient$);\nbackground-image: -webkit-linear-gradient($gradient$);\nbackground-image: -ms-linear-gradient($gradient$);\n``\n\\end\n\n\\define datauri(title)\n<$macrocall $name=\"makedatauri\" type={{$title$!!type}} text={{$title$}}/>\n\\end\n\n\\define if-sidebar(text)\n<$reveal state=\"$:/state/sidebar\" type=\"match\" text=\"yes\" default=\"yes\">$text$</$reveal>\n\\end\n\n\\define if-no-sidebar(text)\n<$reveal state=\"$:/state/sidebar\" type=\"nomatch\" text=\"yes\" default=\"yes\">$text$</$reveal>\n\\end\n"
        },
        "$:/core/macros/export": {
            "title": "$:/core/macros/export",
            "tags": "$:/tags/Macro",
            "text": "\\define exportButtonFilename(baseFilename)\n$baseFilename$$(extension)$\n\\end\n\n\\define exportButton(exportFilter:\"[!is[system]sort[title]]\",lingoBase,baseFilename:\"tiddlers\")\n<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/export\">> tooltip={{$lingoBase$Hint}} aria-label={{$lingoBase$Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/export-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$lingoBase$Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/export\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Exporter]]\">\n<$set name=\"extension\" value={{!!extension}}>\n<$button class=\"tc-btn-invisible\">\n<$action-sendmessage $message=\"tm-download-file\" $param=<<currentTiddler>> exportFilter=\"\"\"$exportFilter$\"\"\" filename=<<exportButtonFilename \"\"\"$baseFilename$\"\"\">>/>\n<$action-deletetiddler $tiddler=<<qualify \"$:/state/popup/export\">>/>\n<$transclude field=\"description\"/>\n</$button>\n</$set>\n</$list>\n</div>\n</$reveal>\n\\end\n"
        },
        "$:/core/macros/image-picker": {
            "title": "$:/core/macros/image-picker",
            "tags": "$:/tags/Macro",
            "text": "\\define image-picker-inner(actions)\n<$button tag=\"a\" tooltip=\"\"\"$(imageTitle)$\"\"\">\n\n$actions$\n\n<$transclude tiddler=<<imageTitle>>/>\n\n</$button>\n\\end\n\n\\define image-picker(actions,subfilter:\"\")\n<div class=\"tc-image-chooser\">\n\n<$list filter=\"[all[shadows+tiddlers]is[image]$subfilter$!has[draft.of]] -[type[application/pdf]] +[sort[title]]\" variable=\"imageTitle\">\n\n<$macrocall $name=\"image-picker-inner\" actions=\"\"\"$actions$\"\"\"/>\n\n</$list>\n\n</div>\n\n\\end\n\n"
        },
        "$:/core/macros/lingo": {
            "title": "$:/core/macros/lingo",
            "tags": "$:/tags/Macro",
            "text": "\\define lingo-base()\n$:/language/\n\\end\n\n\\define lingo(title)\n{{$(lingo-base)$$title$}}\n\\end\n"
        },
        "$:/core/macros/list": {
            "title": "$:/core/macros/list",
            "tags": "$:/tags/Macro",
            "text": "\\define list-links(filter,type:\"ul\",subtype:\"li\",class:\"\")\n<$type$ class=\"$class$\">\n<$list filter=\"$filter$\">\n<$subtype$>\n<$link to={{!!title}}>\n<$transclude field=\"caption\">\n<$view field=\"title\"/>\n</$transclude>\n</$link>\n</$subtype$>\n</$list>\n</$type$>\n\\end\n"
        },
        "$:/core/macros/tabs": {
            "title": "$:/core/macros/tabs",
            "tags": "$:/tags/Macro",
            "text": "\\define tabs(tabsList,default,state:\"$:/state/tab\",class,template)\n<div class=\"tc-tab-set $class$\">\n<div class=\"tc-tab-buttons $class$\">\n<$list filter=\"$tabsList$\" variable=\"currentTab\"><$set name=\"save-currentTiddler\" value=<<currentTiddler>>><$tiddler tiddler=<<currentTab>>><$button set=<<qualify \"$state$\">> setTo=<<currentTab>> default=\"$default$\" selectedClass=\"tc-tab-selected\" tooltip={{!!tooltip}}>\n<$tiddler tiddler=<<save-currentTiddler>>>\n<$set name=\"tv-wikilinks\" value=\"no\">\n<$transclude tiddler=<<currentTab>> field=\"caption\">\n<$macrocall $name=\"currentTab\" $type=\"text/plain\" $output=\"text/plain\"/>\n</$transclude>\n</$set></$tiddler></$button></$tiddler></$set></$list>\n</div>\n<div class=\"tc-tab-divider $class$\"/>\n<div class=\"tc-tab-content $class$\">\n<$list filter=\"$tabsList$\" variable=\"currentTab\">\n\n<$reveal type=\"match\" state=<<qualify \"$state$\">> text=<<currentTab>> default=\"$default$\">\n\n<$transclude tiddler=\"$template$\" mode=\"block\">\n\n<$transclude tiddler=<<currentTab>> mode=\"block\"/>\n\n</$transclude>\n\n</$reveal>\n\n</$list>\n</div>\n</div>\n\\end\n"
        },
        "$:/core/macros/tag": {
            "title": "$:/core/macros/tag",
            "tags": "$:/tags/Macro",
            "text": "\\define tag(tag)\n{{$tag$||$:/core/ui/TagTemplate}}\n\\end\n"
        },
        "$:/core/macros/thumbnails": {
            "title": "$:/core/macros/thumbnails",
            "tags": "$:/tags/Macro",
            "text": "\\define thumbnail(link,icon,color,background-color,image,caption,width:\"280\",height:\"157\")\n<$link to=\"\"\"$link$\"\"\"><div class=\"tc-thumbnail-wrapper\">\n<div class=\"tc-thumbnail-image\" style=\"width:$width$px;height:$height$px;\"><$reveal type=\"nomatch\" text=\"\" default=\"\"\"$image$\"\"\" tag=\"div\" style=\"width:$width$px;height:$height$px;\">\n[img[$image$]]\n</$reveal><$reveal type=\"match\" text=\"\" default=\"\"\"$image$\"\"\" tag=\"div\" class=\"tc-thumbnail-background\" style=\"width:$width$px;height:$height$px;background-color:$background-color$;\"></$reveal></div><div class=\"tc-thumbnail-icon\" style=\"fill:$color$;color:$color$;\">\n$icon$\n</div><div class=\"tc-thumbnail-caption\">\n$caption$\n</div>\n</div></$link>\n\\end\n\n\\define thumbnail-right(link,icon,color,background-color,image,caption,width:\"280\",height:\"157\")\n<div class=\"tc-thumbnail-right-wrapper\"><<thumbnail \"\"\"$link$\"\"\" \"\"\"$icon$\"\"\" \"\"\"$color$\"\"\" \"\"\"$background-color$\"\"\" \"\"\"$image$\"\"\" \"\"\"$caption$\"\"\" \"\"\"$width$\"\"\" \"\"\"$height$\"\"\">></div>\n\\end\n\n\\define list-thumbnails(filter,width:\"280\",height:\"157\")\n<$list filter=\"\"\"$filter$\"\"\"><$macrocall $name=\"thumbnail\" link={{!!link}} icon={{!!icon}} color={{!!color}} background-color={{!!background-color}} image={{!!image}} caption={{!!caption}} width=\"\"\"$width$\"\"\" height=\"\"\"$height$\"\"\"/></$list>\n\\end\n"
        },
        "$:/core/macros/timeline": {
            "created": "20141212105914482",
            "modified": "20141212110330815",
            "tags": "$:/tags/Macro",
            "title": "$:/core/macros/timeline",
            "type": "text/vnd.tiddlywiki",
            "text": "\\define timeline-title()\n<!-- Override this macro with a global macro \n     of the same name if you need to change \n     how titles are displayed on the timeline \n     -->\n<$view field=\"title\"/>\n\\end\n\\define timeline(limit:\"100\",format:\"DDth MMM YYYY\",subfilter:\"\",dateField:\"modified\")\n<div class=\"tc-timeline\">\n<$list filter=\"[!is[system]$subfilter$has[$dateField$]!sort[$dateField$]limit[$limit$]eachday[$dateField$]]\">\n<div class=\"tc-menu-list-item\">\n<$view field=\"$dateField$\" format=\"date\" template=\"$format$\"/>\n<$list filter=\"[sameday:$dateField${!!$dateField$}!is[system]$subfilter$!sort[$dateField$]]\">\n<div class=\"tc-menu-list-subitem\">\n<$link to={{!!title}}>\n<<timeline-title>>\n</$link>\n</div>\n</$list>\n</div>\n</$list>\n</div>\n\\end\n"
        },
        "$:/core/macros/toc": {
            "title": "$:/core/macros/toc",
            "tags": "$:/tags/Macro",
            "text": "\\define toc-caption()\n<$set name=\"tv-wikilinks\" value=\"no\">\n<$transclude field=\"caption\">\n<$view field=\"title\"/>\n</$transclude>\n</$set>\n\\end\n\n\\define toc-body(rootTag,tag,sort:\"\",itemClassFilter)\n<ol class=\"tc-toc\">\n<$list filter=\"\"\"[all[shadows+tiddlers]tag[$tag$]!has[draft.of]$sort$]\"\"\">\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$list filter=\"[all[current]toc-link[no]]\" emptyMessage=\"<$link><$view field='caption'><$view field='title'/></$view></$link>\">\n<<toc-caption>>\n</$list>\n<$list filter=\"\"\"[all[current]] -[[$rootTag$]]\"\"\">\n<$macrocall $name=\"toc-body\" rootTag=\"\"\"$rootTag$\"\"\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$list>\n</li>\n</$set>\n</$list>\n</ol>\n\\end\n\n\\define toc(tag,sort:\"\",itemClassFilter)\n<<toc-body rootTag:\"\"\"$tag$\"\"\" tag:\"\"\"$tag$\"\"\" sort:\"\"\"$sort$\"\"\" itemClassFilter:\"\"\"itemClassFilter\"\"\">>\n\\end\n\n\\define toc-linked-expandable-body(tag,sort:\"\",itemClassFilter)\n<$set name=\"toc-state\" value=<<qualify \"\"\"$:/state/toc/$tag$-$(currentTiddler)$\"\"\">>>\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$link>\n<$reveal type=\"nomatch\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"open\" class=\"tc-btn-invisible\">\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"close\" class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n<<toc-caption>>\n</$link>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$macrocall $name=\"toc-expandable\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$reveal>\n</li>\n</$set>\n</$set>\n\\end\n\n\\define toc-unlinked-expandable-body(tag,sort:\"\",itemClassFilter)\n<$set name=\"toc-state\" value=<<qualify \"\"\"$:/state/toc/$tag$-$(currentTiddler)$\"\"\">>>\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$reveal type=\"nomatch\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"open\" class=\"tc-btn-invisible\">\n{{$:/core/images/right-arrow}}\n<<toc-caption>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"close\" class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n<<toc-caption>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$macrocall $name=\"toc-expandable\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$reveal>\n</li>\n</$set>\n</$set>\n\\end\n\n\\define toc-expandable-empty-message()\n<<toc-linked-expandable-body tag:\"\"\"$(tag)$\"\"\" sort:\"\"\"$(sort)$\"\"\" itemClassFilter:\"\"\"$(itemClassFilter)$\"\"\">>\n\\end\n\n\\define toc-expandable(tag,sort:\"\",itemClassFilter)\n<$vars tag=\"\"\"$tag$\"\"\" sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\">\n<ol class=\"tc-toc toc-expandable\">\n<$list filter=\"[all[shadows+tiddlers]tag[$tag$]!has[draft.of]$sort$]\">\n<$list filter=\"[all[current]toc-link[no]]\" emptyMessage=<<toc-expandable-empty-message>>>\n<<toc-unlinked-expandable-body tag:\"\"\"$tag$\"\"\" sort:\"\"\"$sort$\"\"\" itemClassFilter:\"\"\"itemClassFilter\"\"\">>\n</$list>\n</$list>\n</ol>\n</$vars>\n\\end\n\n\\define toc-linked-selective-expandable-body(tag,sort:\"\",itemClassFilter)\n<$set name=\"toc-state\" value=<<qualify \"\"\"$:/state/toc/$tag$-$(currentTiddler)$\"\"\">>>\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$link>\n<$list filter=\"[all[current]tagging[]limit[1]]\" variable=\"ignore\" emptyMessage=\"<$button class='tc-btn-invisible'>{{$:/core/images/blank}}</$button>\">\n<$reveal type=\"nomatch\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"open\" class=\"tc-btn-invisible\">\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"close\" class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n</$list>\n<<toc-caption>>\n</$link>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$macrocall $name=\"toc-selective-expandable\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$reveal>\n</li>\n</$set>\n</$set>\n\\end\n\n\\define toc-unlinked-selective-expandable-body(tag,sort:\"\",itemClassFilter)\n<$set name=\"toc-state\" value=<<qualify \"\"\"$:/state/toc/$tag$-$(currentTiddler)$\"\"\">>>\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$list filter=\"[all[current]tagging[]limit[1]]\" variable=\"ignore\" emptyMessage=\"<$button class='tc-btn-invisible'>{{$:/core/images/blank}}</$button> <$view field='caption'><$view field='title'/></$view>\">\n<$reveal type=\"nomatch\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"open\" class=\"tc-btn-invisible\">\n{{$:/core/images/right-arrow}}\n<<toc-caption>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"close\" class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n<<toc-caption>>\n</$button>\n</$reveal>\n</$list>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$macrocall $name=\"\"\"toc-selective-expandable\"\"\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$reveal>\n</li>\n</$set>\n</$set>\n\\end\n\n\\define toc-selective-expandable-empty-message()\n<<toc-linked-selective-expandable-body tag:\"\"\"$(tag)$\"\"\" sort:\"\"\"$(sort)$\"\"\" itemClassFilter:\"\"\"$(itemClassFilter)$\"\"\">>\n\\end\n\n\\define toc-selective-expandable(tag,sort:\"\",itemClassFilter)\n<$vars tag=\"\"\"$tag$\"\"\" sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\">\n<ol class=\"tc-toc toc-selective-expandable\">\n<$list filter=\"[all[shadows+tiddlers]tag[$tag$]!has[draft.of]$sort$]\">\n<$list filter=\"[all[current]toc-link[no]]\" variable=\"ignore\" emptyMessage=<<toc-selective-expandable-empty-message>>>\n<<toc-unlinked-selective-expandable-body tag:\"\"\"$tag$\"\"\" sort:\"\"\"$sort$\"\"\" itemClassFilter:\"\"\"$itemClassFilter$\"\"\">>\n</$list>\n</$list>\n</ol>\n</$vars>\n\\end\n\n\\define toc-tabbed-selected-item-filter(selectedTiddler)\n[all[current]field:title{$selectedTiddler$}]\n\\end\n\n\\define toc-tabbed-external-nav(tag,sort:\"\",selectedTiddler:\"$:/temp/toc/selectedTiddler\",unselectedText,missingText,template:\"\")\n<$tiddler tiddler={{$selectedTiddler$}}>\n<div class=\"tc-tabbed-table-of-contents\">\n<$linkcatcher to=\"$selectedTiddler$\">\n<div class=\"tc-table-of-contents\">\n<$macrocall $name=\"toc-selective-expandable\" tag=\"\"\"$tag$\"\"\" sort=\"\"\"$sort$\"\"\" itemClassFilter=<<toc-tabbed-selected-item-filter selectedTiddler:\"\"\"$selectedTiddler$\"\"\">>/>\n</div>\n</$linkcatcher>\n<div class=\"tc-tabbed-table-of-contents-content\">\n<$reveal state=\"\"\"$selectedTiddler$\"\"\" type=\"nomatch\" text=\"\">\n<$transclude mode=\"block\" tiddler=\"$template$\">\n<h1><<toc-caption>></h1>\n<$transclude mode=\"block\">$missingText$</$transclude>\n</$transclude>\n</$reveal>\n<$reveal state=\"\"\"$selectedTiddler$\"\"\" type=\"match\" text=\"\">\n$unselectedText$\n</$reveal>\n</div>\n</div>\n</$tiddler>\n\\end\n\n\\define toc-tabbed-internal-nav(tag,sort:\"\",selectedTiddler:\"$:/temp/toc/selectedTiddler\",unselectedText,missingText,template:\"\")\n<$linkcatcher to=\"\"\"$selectedTiddler$\"\"\">\n<$macrocall $name=\"toc-tabbed-external-nav\" tag=\"\"\"$tag$\"\"\" sort=\"\"\"$sort$\"\"\" selectedTiddler=\"\"\"$selectedTiddler$\"\"\" unselectedText=\"\"\"$unselectedText$\"\"\" missingText=\"\"\"$missingText$\"\"\" template=\"\"\"$template$\"\"\"/>\n</$linkcatcher>\n\\end\n\n"
        },
        "$:/core/macros/translink": {
            "title": "$:/core/macros/translink",
            "tags": "$:/tags/Macro",
            "text": "\\define translink(title,mode:\"block\")\n<div style=\"border:1px solid #ccc; padding: 0.5em; background: black; foreground; white;\">\n<$link to=\"\"\"$title$\"\"\">\n<$text text=\"\"\"$title$\"\"\"/>\n</$link>\n<div style=\"border:1px solid #ccc; padding: 0.5em; background: white; foreground; black;\">\n<$transclude tiddler=\"\"\"$title$\"\"\" mode=\"$mode$\">\n\"<$text text=\"\"\"$title$\"\"\"/>\" is missing\n</$transclude>\n</div>\n</div>\n\\end\n"
        },
        "$:/snippets/minilanguageswitcher": {
            "title": "$:/snippets/minilanguageswitcher",
            "text": "<$select tiddler=\"$:/language\">\n<$list filter=\"[[$:/languages/en-GB]] [plugin-type[language]sort[title]]\">\n<option value=<<currentTiddler>>><$view field=\"description\"><$view field=\"name\"><$view field=\"title\"/></$view></$view></option>\n</$list>\n</$select>"
        },
        "$:/snippets/minithemeswitcher": {
            "title": "$:/snippets/minithemeswitcher",
            "text": "\\define lingo-base() $:/language/ControlPanel/Theme/\n<<lingo Prompt>> <$select tiddler=\"$:/theme\">\n<$list filter=\"[plugin-type[theme]sort[title]]\">\n<option value=<<currentTiddler>>><$view field=\"name\"><$view field=\"title\"/></$view></option>\n</$list>\n</$select>"
        },
        "$:/snippets/modules": {
            "title": "$:/snippets/modules",
            "text": "\\define describeModuleType(type)\n{{$:/language/Docs/ModuleTypes/$type$}}\n\\end\n<$list filter=\"[moduletypes[]]\">\n\n!! <$macrocall $name=\"currentTiddler\" $type=\"text/plain\" $output=\"text/plain\"/>\n\n<$macrocall $name=\"describeModuleType\" type=<<currentTiddler>>/>\n\n<ul><$list filter=\"[all[current]modules[]]\"><li><$link><<currentTiddler>></$link>\n</li>\n</$list>\n</ul>\n</$list>\n"
        },
        "$:/palette": {
            "title": "$:/palette",
            "text": "$:/palettes/Vanilla"
        },
        "$:/snippets/paletteeditor": {
            "title": "$:/snippets/paletteeditor",
            "text": "\\define lingo-base() $:/language/ControlPanel/Palette/Editor/\n\\define describePaletteColour(colour)\n<$transclude tiddler=\"$:/language/Docs/PaletteColours/$colour$\"><$text text=\"$colour$\"/></$transclude>\n\\end\n<$set name=\"currentTiddler\" value={{$:/palette}}>\n\n<<lingo Prompt>> <$link to={{$:/palette}}><$macrocall $name=\"currentTiddler\" $output=\"text/plain\"/></$link>\n\n<$list filter=\"[all[current]is[shadow]is[tiddler]]\" variable=\"listItem\">\n<<lingo Prompt/Modified>>\n<$button message=\"tm-delete-tiddler\" param={{$:/palette}}><<lingo Reset/Caption>></$button>\n</$list>\n\n<$list filter=\"[all[current]is[shadow]!is[tiddler]]\" variable=\"listItem\">\n<<lingo Clone/Prompt>>\n</$list>\n\n<$button message=\"tm-new-tiddler\" param={{$:/palette}}><<lingo Clone/Caption>></$button>\n\n<table>\n<tbody>\n<$list filter=\"[all[current]indexes[]]\" variable=\"colourName\">\n<tr>\n<td>\n''<$macrocall $name=\"describePaletteColour\" colour=<<colourName>>/>''<br/>\n<$macrocall $name=\"colourName\" $output=\"text/plain\"/>\n</td>\n<td>\n<$edit-text index=<<colourName>> tag=\"input\"/>\n<br>\n<$edit-text index=<<colourName>> type=\"color\" tag=\"input\"/>\n</td>\n</tr>\n</$list>\n</tbody>\n</table>\n</$set>\n"
        },
        "$:/snippets/palettepreview": {
            "title": "$:/snippets/palettepreview",
            "text": "<$set name=\"currentTiddler\" value={{$:/palette}}>\n<$transclude tiddler=\"$:/snippets/currpalettepreview\"/>\n</$set>\n"
        },
        "$:/snippets/paletteswitcher": {
            "title": "$:/snippets/paletteswitcher",
            "text": "\\define lingo-base() $:/language/ControlPanel/Palette/\n<div class=\"tc-prompt\">\n<<lingo Prompt>> <$view tiddler={{$:/palette}} field=\"name\"/>\n</div>\n\n<$linkcatcher to=\"$:/palette\">\n<div class=\"tc-chooser\"><$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Palette]sort[description]]\"><div class=\"tc-chooser-item\"><$link to={{!!title}}><div><$reveal state=\"$:/palette\" type=\"match\" text={{!!title}}>&bull;</$reveal><$reveal state=\"$:/palette\" type=\"nomatch\" text={{!!title}}>&nbsp;</$reveal> ''<$view field=\"name\" format=\"text\"/>'' - <$view field=\"description\" format=\"text\"/></div><$transclude tiddler=\"$:/snippets/currpalettepreview\"/></$link></div>\n</$list>\n</div>\n</$linkcatcher>"
        },
        "$:/temp/search": {
            "title": "$:/temp/search",
            "text": ""
        },
        "$:/tags/AdvancedSearch": {
            "title": "$:/tags/AdvancedSearch",
            "list": "[[$:/core/ui/AdvancedSearch/Standard]] [[$:/core/ui/AdvancedSearch/System]] [[$:/core/ui/AdvancedSearch/Shadows]] [[$:/core/ui/AdvancedSearch/Filter]]"
        },
        "$:/tags/AdvancedSearch/FilterButton": {
            "title": "$:/tags/AdvancedSearch/FilterButton",
            "list": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/dropdown $:/core/ui/AdvancedSearch/Filter/FilterButtons/clear $:/core/ui/AdvancedSearch/Filter/FilterButtons/export $:/core/ui/AdvancedSearch/Filter/FilterButtons/delete"
        },
        "$:/tags/ControlPanel": {
            "title": "$:/tags/ControlPanel",
            "list": "$:/core/ui/ControlPanel/Info $:/core/ui/ControlPanel/Appearance $:/core/ui/ControlPanel/Settings $:/core/ui/ControlPanel/Saving $:/core/ui/ControlPanel/Plugins $:/core/ui/ControlPanel/Tools $:/core/ui/ControlPanel/Internals"
        },
        "$:/tags/ControlPanel/Info": {
            "title": "$:/tags/ControlPanel/Info",
            "list": "$:/core/ui/ControlPanel/Basics $:/core/ui/ControlPanel/Advanced"
        },
        "$:/tags/ControlPanel/Plugins": {
            "title": "$:/tags/ControlPanel/Plugins",
            "list": "[[$:/core/ui/ControlPanel/Plugins/Installed]] [[$:/core/ui/ControlPanel/Plugins/Add]]"
        },
        "$:/tags/EditorToolbar": {
            "title": "$:/tags/EditorToolbar",
            "list": "$:/core/ui/EditorToolbar/paint $:/core/ui/EditorToolbar/opacity $:/core/ui/EditorToolbar/line-width $:/core/ui/EditorToolbar/clear $:/core/ui/EditorToolbar/bold $:/core/ui/EditorToolbar/italic $:/core/ui/EditorToolbar/strikethrough $:/core/ui/EditorToolbar/underline $:/core/ui/EditorToolbar/superscript $:/core/ui/EditorToolbar/subscript $:/core/ui/EditorToolbar/mono-line $:/core/ui/EditorToolbar/mono-block $:/core/ui/EditorToolbar/quote $:/core/ui/EditorToolbar/list-bullet $:/core/ui/EditorToolbar/list-number $:/core/ui/EditorToolbar/heading-1 $:/core/ui/EditorToolbar/heading-2 $:/core/ui/EditorToolbar/heading-3 $:/core/ui/EditorToolbar/heading-4 $:/core/ui/EditorToolbar/heading-5 $:/core/ui/EditorToolbar/heading-6 $:/core/ui/EditorToolbar/link $:/core/ui/EditorToolbar/excise $:/core/ui/EditorToolbar/picture $:/core/ui/EditorToolbar/stamp $:/core/ui/EditorToolbar/size $:/core/ui/EditorToolbar/editor-height $:/core/ui/EditorToolbar/more $:/core/ui/EditorToolbar/preview $:/core/ui/EditorToolbar/preview-type"
        },
        "$:/tags/EditTemplate": {
            "title": "$:/tags/EditTemplate",
            "list": "[[$:/core/ui/EditTemplate/controls]] [[$:/core/ui/EditTemplate/title]] [[$:/core/ui/EditTemplate/tags]] [[$:/core/ui/EditTemplate/shadow]] [[$:/core/ui/ViewTemplate/classic]] [[$:/core/ui/EditTemplate/body]] [[$:/core/ui/EditTemplate/type]] [[$:/core/ui/EditTemplate/fields]]"
        },
        "$:/tags/EditToolbar": {
            "title": "$:/tags/EditToolbar",
            "list": "[[$:/core/ui/Buttons/delete]] [[$:/core/ui/Buttons/cancel]] [[$:/core/ui/Buttons/save]]"
        },
        "$:/tags/MoreSideBar": {
            "title": "$:/tags/MoreSideBar",
            "list": "[[$:/core/ui/MoreSideBar/All]] [[$:/core/ui/MoreSideBar/Recent]] [[$:/core/ui/MoreSideBar/Tags]] [[$:/core/ui/MoreSideBar/Missing]] [[$:/core/ui/MoreSideBar/Drafts]] [[$:/core/ui/MoreSideBar/Orphans]] [[$:/core/ui/MoreSideBar/Types]] [[$:/core/ui/MoreSideBar/System]] [[$:/core/ui/MoreSideBar/Shadows]]",
            "text": ""
        },
        "$:/tags/PageControls": {
            "title": "$:/tags/PageControls",
            "list": "[[$:/core/ui/Buttons/home]] [[$:/core/ui/Buttons/close-all]] [[$:/core/ui/Buttons/fold-all]] [[$:/core/ui/Buttons/unfold-all]] [[$:/core/ui/Buttons/permaview]] [[$:/core/ui/Buttons/new-tiddler]] [[$:/core/ui/Buttons/new-journal]] [[$:/core/ui/Buttons/new-image]] [[$:/core/ui/Buttons/import]] [[$:/core/ui/Buttons/export-page]] [[$:/core/ui/Buttons/control-panel]] [[$:/core/ui/Buttons/advanced-search]] [[$:/core/ui/Buttons/tag-manager]] [[$:/core/ui/Buttons/language]] [[$:/core/ui/Buttons/palette]] [[$:/core/ui/Buttons/theme]] [[$:/core/ui/Buttons/storyview]] [[$:/core/ui/Buttons/encryption]] [[$:/core/ui/Buttons/full-screen]] [[$:/core/ui/Buttons/save-wiki]] [[$:/core/ui/Buttons/refresh]] [[$:/core/ui/Buttons/more-page-actions]]"
        },
        "$:/tags/PageTemplate": {
            "title": "$:/tags/PageTemplate",
            "list": "[[$:/core/ui/PageTemplate/topleftbar]] [[$:/core/ui/PageTemplate/toprightbar]] [[$:/core/ui/PageTemplate/sidebar]] [[$:/core/ui/PageTemplate/story]] [[$:/core/ui/PageTemplate/alerts]]",
            "text": ""
        },
        "$:/tags/SideBar": {
            "title": "$:/tags/SideBar",
            "list": "[[$:/core/ui/SideBar/Open]] [[$:/core/ui/SideBar/Recent]] [[$:/core/ui/SideBar/Tools]] [[$:/core/ui/SideBar/More]]",
            "text": ""
        },
        "$:/tags/TiddlerInfo": {
            "title": "$:/tags/TiddlerInfo",
            "list": "[[$:/core/ui/TiddlerInfo/Tools]] [[$:/core/ui/TiddlerInfo/References]] [[$:/core/ui/TiddlerInfo/Tagging]] [[$:/core/ui/TiddlerInfo/List]] [[$:/core/ui/TiddlerInfo/Listed]] [[$:/core/ui/TiddlerInfo/Fields]]",
            "text": ""
        },
        "$:/tags/TiddlerInfo/Advanced": {
            "title": "$:/tags/TiddlerInfo/Advanced",
            "list": "[[$:/core/ui/TiddlerInfo/Advanced/ShadowInfo]] [[$:/core/ui/TiddlerInfo/Advanced/PluginInfo]]"
        },
        "$:/tags/ViewTemplate": {
            "title": "$:/tags/ViewTemplate",
            "list": "[[$:/core/ui/ViewTemplate/title]] [[$:/core/ui/ViewTemplate/unfold]] [[$:/core/ui/ViewTemplate/subtitle]] [[$:/core/ui/ViewTemplate/tags]] [[$:/core/ui/ViewTemplate/classic]] [[$:/core/ui/ViewTemplate/body]]"
        },
        "$:/tags/ViewToolbar": {
            "title": "$:/tags/ViewToolbar",
            "list": "[[$:/core/ui/Buttons/more-tiddler-actions]] [[$:/core/ui/Buttons/info]] [[$:/core/ui/Buttons/new-here]] [[$:/core/ui/Buttons/new-journal-here]] [[$:/core/ui/Buttons/clone]] [[$:/core/ui/Buttons/export-tiddler]] [[$:/core/ui/Buttons/edit]] [[$:/core/ui/Buttons/delete]] [[$:/core/ui/Buttons/permalink]] [[$:/core/ui/Buttons/permaview]] [[$:/core/ui/Buttons/open-window]] [[$:/core/ui/Buttons/close-others]] [[$:/core/ui/Buttons/close]] [[$:/core/ui/Buttons/fold-others]] [[$:/core/ui/Buttons/fold]]"
        },
        "$:/snippets/themeswitcher": {
            "title": "$:/snippets/themeswitcher",
            "text": "\\define lingo-base() $:/language/ControlPanel/Theme/\n<<lingo Prompt>> <$view tiddler={{$:/theme}} field=\"name\"/>\n\n<$linkcatcher to=\"$:/theme\">\n<$list filter=\"[plugin-type[theme]sort[title]]\"><div><$reveal state=\"$:/theme\" type=\"match\" text={{!!title}}>&bull;</$reveal><$reveal state=\"$:/theme\" type=\"nomatch\" text={{!!title}}>&nbsp;</$reveal> <$link to={{!!title}}>''<$view field=\"name\" format=\"text\"/>'' <$view field=\"description\" format=\"text\"/></$link></div>\n</$list>\n</$linkcatcher>"
        },
        "$:/core/wiki/title": {
            "title": "$:/core/wiki/title",
            "type": "text/vnd.tiddlywiki",
            "text": "{{$:/SiteTitle}} --- {{$:/SiteSubtitle}}"
        },
        "$:/view": {
            "title": "$:/view",
            "text": "classic"
        },
        "$:/snippets/viewswitcher": {
            "title": "$:/snippets/viewswitcher",
            "text": "\\define lingo-base() $:/language/ControlPanel/StoryView/\n<<lingo Prompt>> <$select tiddler=\"$:/view\">\n<$list filter=\"[storyviews[]]\">\n<option><$view field=\"title\"/></option>\n</$list>\n</$select>"
        }
    }
}
<div class="tc-more-sidebar">
<<tabs "[all[shadows+tiddlers]tag[$:/tags/MoreSideBar]!has[draft.of]]" "$:/core/ui/MoreSideBar/Tags" "$:/state/tab/moresidebar" "tc-vertical">>
</div>
<$macrocall $name="timeline" format={{$:/language/RecentChanges/DateFormat}}/>
\define lingo-base() $:/language/ControlPanel/
\define config-title()
$:/config/PageControlButtons/Visibility/$(listItem)$
\end

<<lingo Basics/Version/Prompt>> <<version>>

<$set name="tv-config-toolbar-icons" value="yes">

<$set name="tv-config-toolbar-text" value="yes">

<$set name="tv-config-toolbar-class" value="">

<$list filter="[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]]" variable="listItem">

<div style="position:relative;">

<$checkbox tiddler=<<config-title>> field="text" checked="show" unchecked="hide" default="show"/> <$transclude tiddler=<<listItem>>/> <i class="tc-muted"><$transclude tiddler=<<listItem>> field="description"/></i>

</div>

</$list>

</$set>

</$set>

</$set>


\define title-styles()
fill:$(foregroundColor)$;
\end
\define config-title()
$:/config/ViewToolbarButtons/Visibility/$(listItem)$
\end
<div class="tc-tiddler-title">
<div class="tc-titlebar">
<span class="tc-tiddler-controls">
<$list filter="[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]]" variable="listItem"><$reveal type="nomatch" state=<<config-title>> text="hide"><$transclude tiddler=<<listItem>>/></$reveal></$list>
</span>
<$set name="tv-wikilinks" value={{$:/config/Tiddlers/TitleLinks}}>
<$link>
<$set name="foregroundColor" value={{!!color}}>
<span class="tc-tiddler-title-icon" style=<<title-styles>>>
<$transclude tiddler={{!!icon}}/>
</span>
</$set>
<$list filter="[all[current]removeprefix[$:/]]">
<h2 class="tc-title" title={{$:/language/SystemTiddler/Tooltip}}>
<span class="tc-system-title-prefix">$:/</span><$text text=<<currentTiddler>>/>
</h2>
</$list>
<$list filter="[all[current]!prefix[$:/]]">
<h2 class="tc-title">
<$transclude field="caption"><$view field="title"/></$transclude>
</h2>
</$list>
</$link>
</$set>
</div>

<$reveal type="nomatch" text="" default="" state=<<tiddlerInfoState>> class="tc-tiddler-info tc-popup-handle" animate="yes" retain="yes">

<$transclude tiddler="$:/core/ui/TiddlerInfo"/>

</$reveal>
</div>
[[Welcome Page]]
no
{
    "tiddlers": {
        "$:/plugins/tiddlywiki/highlight/highlight.js": {
            "type": "application/javascript",
            "title": "$:/plugins/tiddlywiki/highlight/highlight.js",
            "module-type": "library",
            "text": "var hljs = require(\"$:/plugins/tiddlywiki/highlight/highlight.js\");\n!function(e){\"undefined\"!=typeof exports?e(exports):(window.hljs=e({}),\"function\"==typeof define&&define.amd&&define(\"hljs\",[],function(){return window.hljs}))}(function(e){function n(e){return e.replace(/&/gm,\"&amp;\").replace(/</gm,\"&lt;\").replace(/>/gm,\"&gt;\")}function t(e){return e.nodeName.toLowerCase()}function r(e,n){var t=e&&e.exec(n);return t&&0==t.index}function a(e){return/^(no-?highlight|plain|text)$/i.test(e)}function i(e){var n,t,r,i=e.className+\" \";if(i+=e.parentNode?e.parentNode.className:\"\",t=/\\blang(?:uage)?-([\\w-]+)\\b/i.exec(i))return w(t[1])?t[1]:\"no-highlight\";for(i=i.split(/\\s+/),n=0,r=i.length;r>n;n++)if(w(i[n])||a(i[n]))return i[n]}function o(e,n){var t,r={};for(t in e)r[t]=e[t];if(n)for(t in n)r[t]=n[t];return r}function u(e){var n=[];return function r(e,a){for(var i=e.firstChild;i;i=i.nextSibling)3==i.nodeType?a+=i.nodeValue.length:1==i.nodeType&&(n.push({event:\"start\",offset:a,node:i}),a=r(i,a),t(i).match(/br|hr|img|input/)||n.push({event:\"stop\",offset:a,node:i}));return a}(e,0),n}function c(e,r,a){function i(){return e.length&&r.length?e[0].offset!=r[0].offset?e[0].offset<r[0].offset?e:r:\"start\"==r[0].event?e:r:e.length?e:r}function o(e){function r(e){return\" \"+e.nodeName+'=\"'+n(e.value)+'\"'}f+=\"<\"+t(e)+Array.prototype.map.call(e.attributes,r).join(\"\")+\">\"}function u(e){f+=\"</\"+t(e)+\">\"}function c(e){(\"start\"==e.event?o:u)(e.node)}for(var s=0,f=\"\",l=[];e.length||r.length;){var g=i();if(f+=n(a.substr(s,g[0].offset-s)),s=g[0].offset,g==e){l.reverse().forEach(u);do c(g.splice(0,1)[0]),g=i();while(g==e&&g.length&&g[0].offset==s);l.reverse().forEach(o)}else\"start\"==g[0].event?l.push(g[0].node):l.pop(),c(g.splice(0,1)[0])}return f+n(a.substr(s))}function s(e){function n(e){return e&&e.source||e}function t(t,r){return new RegExp(n(t),\"m\"+(e.cI?\"i\":\"\")+(r?\"g\":\"\"))}function r(a,i){if(!a.compiled){if(a.compiled=!0,a.k=a.k||a.bK,a.k){var u={},c=function(n,t){e.cI&&(t=t.toLowerCase()),t.split(\" \").forEach(function(e){var t=e.split(\"|\");u[t[0]]=[n,t[1]?Number(t[1]):1]})};\"string\"==typeof a.k?c(\"keyword\",a.k):Object.keys(a.k).forEach(function(e){c(e,a.k[e])}),a.k=u}a.lR=t(a.l||/\\b\\w+\\b/,!0),i&&(a.bK&&(a.b=\"\\\\b(\"+a.bK.split(\" \").join(\"|\")+\")\\\\b\"),a.b||(a.b=/\\B|\\b/),a.bR=t(a.b),a.e||a.eW||(a.e=/\\B|\\b/),a.e&&(a.eR=t(a.e)),a.tE=n(a.e)||\"\",a.eW&&i.tE&&(a.tE+=(a.e?\"|\":\"\")+i.tE)),a.i&&(a.iR=t(a.i)),void 0===a.r&&(a.r=1),a.c||(a.c=[]);var s=[];a.c.forEach(function(e){e.v?e.v.forEach(function(n){s.push(o(e,n))}):s.push(\"self\"==e?a:e)}),a.c=s,a.c.forEach(function(e){r(e,a)}),a.starts&&r(a.starts,i);var f=a.c.map(function(e){return e.bK?\"\\\\.?(\"+e.b+\")\\\\.?\":e.b}).concat([a.tE,a.i]).map(n).filter(Boolean);a.t=f.length?t(f.join(\"|\"),!0):{exec:function(){return null}}}}r(e)}function f(e,t,a,i){function o(e,n){for(var t=0;t<n.c.length;t++)if(r(n.c[t].bR,e))return n.c[t]}function u(e,n){if(r(e.eR,n)){for(;e.endsParent&&e.parent;)e=e.parent;return e}return e.eW?u(e.parent,n):void 0}function c(e,n){return!a&&r(n.iR,e)}function g(e,n){var t=N.cI?n[0].toLowerCase():n[0];return e.k.hasOwnProperty(t)&&e.k[t]}function h(e,n,t,r){var a=r?\"\":E.classPrefix,i='<span class=\"'+a,o=t?\"\":\"</span>\";return i+=e+'\">',i+n+o}function p(){if(!L.k)return n(y);var e=\"\",t=0;L.lR.lastIndex=0;for(var r=L.lR.exec(y);r;){e+=n(y.substr(t,r.index-t));var a=g(L,r);a?(B+=a[1],e+=h(a[0],n(r[0]))):e+=n(r[0]),t=L.lR.lastIndex,r=L.lR.exec(y)}return e+n(y.substr(t))}function d(){var e=\"string\"==typeof L.sL;if(e&&!x[L.sL])return n(y);var t=e?f(L.sL,y,!0,M[L.sL]):l(y,L.sL.length?L.sL:void 0);return L.r>0&&(B+=t.r),e&&(M[L.sL]=t.top),h(t.language,t.value,!1,!0)}function b(){return void 0!==L.sL?d():p()}function v(e,t){var r=e.cN?h(e.cN,\"\",!0):\"\";e.rB?(k+=r,y=\"\"):e.eB?(k+=n(t)+r,y=\"\"):(k+=r,y=t),L=Object.create(e,{parent:{value:L}})}function m(e,t){if(y+=e,void 0===t)return k+=b(),0;var r=o(t,L);if(r)return k+=b(),v(r,t),r.rB?0:t.length;var a=u(L,t);if(a){var i=L;i.rE||i.eE||(y+=t),k+=b();do L.cN&&(k+=\"</span>\"),B+=L.r,L=L.parent;while(L!=a.parent);return i.eE&&(k+=n(t)),y=\"\",a.starts&&v(a.starts,\"\"),i.rE?0:t.length}if(c(t,L))throw new Error('Illegal lexeme \"'+t+'\" for mode \"'+(L.cN||\"<unnamed>\")+'\"');return y+=t,t.length||1}var N=w(e);if(!N)throw new Error('Unknown language: \"'+e+'\"');s(N);var R,L=i||N,M={},k=\"\";for(R=L;R!=N;R=R.parent)R.cN&&(k=h(R.cN,\"\",!0)+k);var y=\"\",B=0;try{for(var C,j,I=0;;){if(L.t.lastIndex=I,C=L.t.exec(t),!C)break;j=m(t.substr(I,C.index-I),C[0]),I=C.index+j}for(m(t.substr(I)),R=L;R.parent;R=R.parent)R.cN&&(k+=\"</span>\");return{r:B,value:k,language:e,top:L}}catch(O){if(-1!=O.message.indexOf(\"Illegal\"))return{r:0,value:n(t)};throw O}}function l(e,t){t=t||E.languages||Object.keys(x);var r={r:0,value:n(e)},a=r;return t.forEach(function(n){if(w(n)){var t=f(n,e,!1);t.language=n,t.r>a.r&&(a=t),t.r>r.r&&(a=r,r=t)}}),a.language&&(r.second_best=a),r}function g(e){return E.tabReplace&&(e=e.replace(/^((<[^>]+>|\\t)+)/gm,function(e,n){return n.replace(/\\t/g,E.tabReplace)})),E.useBR&&(e=e.replace(/\\n/g,\"<br>\")),e}function h(e,n,t){var r=n?R[n]:t,a=[e.trim()];return e.match(/\\bhljs\\b/)||a.push(\"hljs\"),-1===e.indexOf(r)&&a.push(r),a.join(\" \").trim()}function p(e){var n=i(e);if(!a(n)){var t;E.useBR?(t=document.createElementNS(\"http://www.w3.org/1999/xhtml\",\"div\"),t.innerHTML=e.innerHTML.replace(/\\n/g,\"\").replace(/<br[ \\/]*>/g,\"\\n\")):t=e;var r=t.textContent,o=n?f(n,r,!0):l(r),s=u(t);if(s.length){var p=document.createElementNS(\"http://www.w3.org/1999/xhtml\",\"div\");p.innerHTML=o.value,o.value=c(s,u(p),r)}o.value=g(o.value),e.innerHTML=o.value,e.className=h(e.className,n,o.language),e.result={language:o.language,re:o.r},o.second_best&&(e.second_best={language:o.second_best.language,re:o.second_best.r})}}function d(e){E=o(E,e)}function b(){if(!b.called){b.called=!0;var e=document.querySelectorAll(\"pre code\");Array.prototype.forEach.call(e,p)}}function v(){addEventListener(\"DOMContentLoaded\",b,!1),addEventListener(\"load\",b,!1)}function m(n,t){var r=x[n]=t(e);r.aliases&&r.aliases.forEach(function(e){R[e]=n})}function N(){return Object.keys(x)}function w(e){return e=e.toLowerCase(),x[e]||x[R[e]]}var E={classPrefix:\"hljs-\",tabReplace:null,useBR:!1,languages:void 0},x={},R={};return e.highlight=f,e.highlightAuto=l,e.fixMarkup=g,e.highlightBlock=p,e.configure=d,e.initHighlighting=b,e.initHighlightingOnLoad=v,e.registerLanguage=m,e.listLanguages=N,e.getLanguage=w,e.inherit=o,e.IR=\"[a-zA-Z]\\\\w*\",e.UIR=\"[a-zA-Z_]\\\\w*\",e.NR=\"\\\\b\\\\d+(\\\\.\\\\d+)?\",e.CNR=\"(\\\\b0[xX][a-fA-F0-9]+|(\\\\b\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+)([eE][-+]?\\\\d+)?)\",e.BNR=\"\\\\b(0b[01]+)\",e.RSR=\"!|!=|!==|%|%=|&|&&|&=|\\\\*|\\\\*=|\\\\+|\\\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\\\?|\\\\[|\\\\{|\\\\(|\\\\^|\\\\^=|\\\\||\\\\|=|\\\\|\\\\||~\",e.BE={b:\"\\\\\\\\[\\\\s\\\\S]\",r:0},e.ASM={cN:\"string\",b:\"'\",e:\"'\",i:\"\\\\n\",c:[e.BE]},e.QSM={cN:\"string\",b:'\"',e:'\"',i:\"\\\\n\",c:[e.BE]},e.PWM={b:/\\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such)\\b/},e.C=function(n,t,r){var a=e.inherit({cN:\"comment\",b:n,e:t,c:[]},r||{});return a.c.push(e.PWM),a.c.push({cN:\"doctag\",b:\"(?:TODO|FIXME|NOTE|BUG|XXX):\",r:0}),a},e.CLCM=e.C(\"//\",\"$\"),e.CBCM=e.C(\"/\\\\*\",\"\\\\*/\"),e.HCM=e.C(\"#\",\"$\"),e.NM={cN:\"number\",b:e.NR,r:0},e.CNM={cN:\"number\",b:e.CNR,r:0},e.BNM={cN:\"number\",b:e.BNR,r:0},e.CSSNM={cN:\"number\",b:e.NR+\"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?\",r:0},e.RM={cN:\"regexp\",b:/\\//,e:/\\/[gimuy]*/,i:/\\n/,c:[e.BE,{b:/\\[/,e:/\\]/,r:0,c:[e.BE]}]},e.TM={cN:\"title\",b:e.IR,r:0},e.UTM={cN:\"title\",b:e.UIR,r:0},e});hljs.registerLanguage(\"markdown\",function(e){return{aliases:[\"md\",\"mkdown\",\"mkd\"],c:[{cN:\"header\",v:[{b:\"^#{1,6}\",e:\"$\"},{b:\"^.+?\\\\n[=-]{2,}$\"}]},{b:\"<\",e:\">\",sL:\"xml\",r:0},{cN:\"bullet\",b:\"^([*+-]|(\\\\d+\\\\.))\\\\s+\"},{cN:\"strong\",b:\"[*_]{2}.+?[*_]{2}\"},{cN:\"emphasis\",v:[{b:\"\\\\*.+?\\\\*\"},{b:\"_.+?_\",r:0}]},{cN:\"blockquote\",b:\"^>\\\\s+\",e:\"$\"},{cN:\"code\",v:[{b:\"`.+?`\"},{b:\"^( {4}|\t)\",e:\"$\",r:0}]},{cN:\"horizontal_rule\",b:\"^[-\\\\*]{3,}\",e:\"$\"},{b:\"\\\\[.+?\\\\][\\\\(\\\\[].*?[\\\\)\\\\]]\",rB:!0,c:[{cN:\"link_label\",b:\"\\\\[\",e:\"\\\\]\",eB:!0,rE:!0,r:0},{cN:\"link_url\",b:\"\\\\]\\\\(\",e:\"\\\\)\",eB:!0,eE:!0},{cN:\"link_reference\",b:\"\\\\]\\\\[\",e:\"\\\\]\",eB:!0,eE:!0}],r:10},{b:\"^\\\\[.+\\\\]:\",rB:!0,c:[{cN:\"link_reference\",b:\"\\\\[\",e:\"\\\\]:\",eB:!0,eE:!0,starts:{cN:\"link_url\",e:\"$\"}}]}]}});hljs.registerLanguage(\"ruby\",function(e){var c=\"[a-zA-Z_]\\\\w*[!?=]?|[-+~]\\\\@|<<|>>|=~|===?|<=>|[<>]=?|\\\\*\\\\*|[-/+%^&*~`|]|\\\\[\\\\]=?\",r=\"and false then defined module in return redo if BEGIN retry end for true self when next until do begin unless END rescue nil else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor\",b={cN:\"doctag\",b:\"@[A-Za-z]+\"},a={cN:\"value\",b:\"#<\",e:\">\"},n=[e.C(\"#\",\"$\",{c:[b]}),e.C(\"^\\\\=begin\",\"^\\\\=end\",{c:[b],r:10}),e.C(\"^__END__\",\"\\\\n$\")],s={cN:\"subst\",b:\"#\\\\{\",e:\"}\",k:r},t={cN:\"string\",c:[e.BE,s],v:[{b:/'/,e:/'/},{b:/\"/,e:/\"/},{b:/`/,e:/`/},{b:\"%[qQwWx]?\\\\(\",e:\"\\\\)\"},{b:\"%[qQwWx]?\\\\[\",e:\"\\\\]\"},{b:\"%[qQwWx]?{\",e:\"}\"},{b:\"%[qQwWx]?<\",e:\">\"},{b:\"%[qQwWx]?/\",e:\"/\"},{b:\"%[qQwWx]?%\",e:\"%\"},{b:\"%[qQwWx]?-\",e:\"-\"},{b:\"%[qQwWx]?\\\\|\",e:\"\\\\|\"},{b:/\\B\\?(\\\\\\d{1,3}|\\\\x[A-Fa-f0-9]{1,2}|\\\\u[A-Fa-f0-9]{4}|\\\\?\\S)\\b/}]},i={cN:\"params\",b:\"\\\\(\",e:\"\\\\)\",k:r},d=[t,a,{cN:\"class\",bK:\"class module\",e:\"$|;\",i:/=/,c:[e.inherit(e.TM,{b:\"[A-Za-z_]\\\\w*(::\\\\w+)*(\\\\?|\\\\!)?\"}),{cN:\"inheritance\",b:\"<\\\\s*\",c:[{cN:\"parent\",b:\"(\"+e.IR+\"::)?\"+e.IR}]}].concat(n)},{cN:\"function\",bK:\"def\",e:\"$|;\",c:[e.inherit(e.TM,{b:c}),i].concat(n)},{cN:\"constant\",b:\"(::)?(\\\\b[A-Z]\\\\w*(::)?)+\",r:0},{cN:\"symbol\",b:e.UIR+\"(\\\\!|\\\\?)?:\",r:0},{cN:\"symbol\",b:\":\",c:[t,{b:c}],r:0},{cN:\"number\",b:\"(\\\\b0[0-7_]+)|(\\\\b0x[0-9a-fA-F_]+)|(\\\\b[1-9][0-9_]*(\\\\.[0-9_]+)?)|[0_]\\\\b\",r:0},{cN:\"variable\",b:\"(\\\\$\\\\W)|((\\\\$|\\\\@\\\\@?)(\\\\w+))\"},{b:\"(\"+e.RSR+\")\\\\s*\",c:[a,{cN:\"regexp\",c:[e.BE,s],i:/\\n/,v:[{b:\"/\",e:\"/[a-z]*\"},{b:\"%r{\",e:\"}[a-z]*\"},{b:\"%r\\\\(\",e:\"\\\\)[a-z]*\"},{b:\"%r!\",e:\"![a-z]*\"},{b:\"%r\\\\[\",e:\"\\\\][a-z]*\"}]}].concat(n),r:0}].concat(n);s.c=d,i.c=d;var o=\"[>?]>\",l=\"[\\\\w#]+\\\\(\\\\w+\\\\):\\\\d+:\\\\d+>\",u=\"(\\\\w+-)?\\\\d+\\\\.\\\\d+\\\\.\\\\d(p\\\\d+)?[^>]+>\",N=[{b:/^\\s*=>/,cN:\"status\",starts:{e:\"$\",c:d}},{cN:\"prompt\",b:\"^(\"+o+\"|\"+l+\"|\"+u+\")\",starts:{e:\"$\",c:d}}];return{aliases:[\"rb\",\"gemspec\",\"podspec\",\"thor\",\"irb\"],k:r,c:n.concat(N).concat(d)}});hljs.registerLanguage(\"makefile\",function(e){var a={cN:\"variable\",b:/\\$\\(/,e:/\\)/,c:[e.BE]};return{aliases:[\"mk\",\"mak\"],c:[e.HCM,{b:/^\\w+\\s*\\W*=/,rB:!0,r:0,starts:{cN:\"constant\",e:/\\s*\\W*=/,eE:!0,starts:{e:/$/,r:0,c:[a]}}},{cN:\"title\",b:/^[\\w]+:\\s*$/},{cN:\"phony\",b:/^\\.PHONY:/,e:/$/,k:\".PHONY\",l:/[\\.\\w]+/},{b:/^\\t+/,e:/$/,r:0,c:[e.QSM,a]}]}});hljs.registerLanguage(\"json\",function(e){var t={literal:\"true false null\"},i=[e.QSM,e.CNM],l={cN:\"value\",e:\",\",eW:!0,eE:!0,c:i,k:t},c={b:\"{\",e:\"}\",c:[{cN:\"attribute\",b:'\\\\s*\"',e:'\"\\\\s*:\\\\s*',eB:!0,eE:!0,c:[e.BE],i:\"\\\\n\",starts:l}],i:\"\\\\S\"},n={b:\"\\\\[\",e:\"\\\\]\",c:[e.inherit(l,{cN:null})],i:\"\\\\S\"};return i.splice(i.length,0,c,n),{c:i,k:t,i:\"\\\\S\"}});hljs.registerLanguage(\"xml\",function(t){var s=\"[A-Za-z0-9\\\\._:-]+\",c={b:/<\\?(php)?(?!\\w)/,e:/\\?>/,sL:\"php\"},e={eW:!0,i:/</,r:0,c:[c,{cN:\"attribute\",b:s,r:0},{b:\"=\",r:0,c:[{cN:\"value\",c:[c],v:[{b:/\"/,e:/\"/},{b:/'/,e:/'/},{b:/[^\\s\\/>]+/}]}]}]};return{aliases:[\"html\",\"xhtml\",\"rss\",\"atom\",\"xsl\",\"plist\"],cI:!0,c:[{cN:\"doctype\",b:\"<!DOCTYPE\",e:\">\",r:10,c:[{b:\"\\\\[\",e:\"\\\\]\"}]},t.C(\"<!--\",\"-->\",{r:10}),{cN:\"cdata\",b:\"<\\\\!\\\\[CDATA\\\\[\",e:\"\\\\]\\\\]>\",r:10},{cN:\"tag\",b:\"<style(?=\\\\s|>|$)\",e:\">\",k:{title:\"style\"},c:[e],starts:{e:\"</style>\",rE:!0,sL:\"css\"}},{cN:\"tag\",b:\"<script(?=\\\\s|>|$)\",e:\">\",k:{title:\"script\"},c:[e],starts:{e:\"</script>\",rE:!0,sL:[\"actionscript\",\"javascript\",\"handlebars\"]}},c,{cN:\"pi\",b:/<\\?\\w+/,e:/\\?>/,r:10},{cN:\"tag\",b:\"</?\",e:\"/?>\",c:[{cN:\"title\",b:/[^ \\/><\\n\\t]+/,r:0},e]}]}});hljs.registerLanguage(\"css\",function(e){var c=\"[a-zA-Z-][a-zA-Z0-9_-]*\",a={cN:\"function\",b:c+\"\\\\(\",rB:!0,eE:!0,e:\"\\\\(\"},r={cN:\"rule\",b:/[A-Z\\_\\.\\-]+\\s*:/,rB:!0,e:\";\",eW:!0,c:[{cN:\"attribute\",b:/\\S/,e:\":\",eE:!0,starts:{cN:\"value\",eW:!0,eE:!0,c:[a,e.CSSNM,e.QSM,e.ASM,e.CBCM,{cN:\"hexcolor\",b:\"#[0-9A-Fa-f]+\"},{cN:\"important\",b:\"!important\"}]}}]};return{cI:!0,i:/[=\\/|'\\$]/,c:[e.CBCM,r,{cN:\"id\",b:/\\#[A-Za-z0-9_-]+/},{cN:\"class\",b:/\\.[A-Za-z0-9_-]+/},{cN:\"attr_selector\",b:/\\[/,e:/\\]/,i:\"$\"},{cN:\"pseudo\",b:/:(:)?[a-zA-Z0-9\\_\\-\\+\\(\\)\"']+/},{cN:\"at_rule\",b:\"@(font-face|page)\",l:\"[a-z-]+\",k:\"font-face page\"},{cN:\"at_rule\",b:\"@\",e:\"[{;]\",c:[{cN:\"keyword\",b:/\\S+/},{b:/\\s/,eW:!0,eE:!0,r:0,c:[a,e.ASM,e.QSM,e.CSSNM]}]},{cN:\"tag\",b:c,r:0},{cN:\"rules\",b:\"{\",e:\"}\",i:/\\S/,c:[e.CBCM,r]}]}});hljs.registerLanguage(\"perl\",function(e){var t=\"getpwent getservent quotemeta msgrcv scalar kill dbmclose undef lc ma syswrite tr send umask sysopen shmwrite vec qx utime local oct semctl localtime readpipe do return format read sprintf dbmopen pop getpgrp not getpwnam rewinddir qqfileno qw endprotoent wait sethostent bless s|0 opendir continue each sleep endgrent shutdown dump chomp connect getsockname die socketpair close flock exists index shmgetsub for endpwent redo lstat msgctl setpgrp abs exit select print ref gethostbyaddr unshift fcntl syscall goto getnetbyaddr join gmtime symlink semget splice x|0 getpeername recv log setsockopt cos last reverse gethostbyname getgrnam study formline endhostent times chop length gethostent getnetent pack getprotoent getservbyname rand mkdir pos chmod y|0 substr endnetent printf next open msgsnd readdir use unlink getsockopt getpriority rindex wantarray hex system getservbyport endservent int chr untie rmdir prototype tell listen fork shmread ucfirst setprotoent else sysseek link getgrgid shmctl waitpid unpack getnetbyname reset chdir grep split require caller lcfirst until warn while values shift telldir getpwuid my getprotobynumber delete and sort uc defined srand accept package seekdir getprotobyname semop our rename seek if q|0 chroot sysread setpwent no crypt getc chown sqrt write setnetent setpriority foreach tie sin msgget map stat getlogin unless elsif truncate exec keys glob tied closedirioctl socket readlink eval xor readline binmode setservent eof ord bind alarm pipe atan2 getgrent exp time push setgrent gt lt or ne m|0 break given say state when\",r={cN:\"subst\",b:\"[$@]\\\\{\",e:\"\\\\}\",k:t},s={b:\"->{\",e:\"}\"},n={cN:\"variable\",v:[{b:/\\$\\d/},{b:/[\\$%@](\\^\\w\\b|#\\w+(::\\w+)*|{\\w+}|\\w+(::\\w*)*)/},{b:/[\\$%@][^\\s\\w{]/,r:0}]},o=[e.BE,r,n],i=[n,e.HCM,e.C(\"^\\\\=\\\\w\",\"\\\\=cut\",{eW:!0}),s,{cN:\"string\",c:o,v:[{b:\"q[qwxr]?\\\\s*\\\\(\",e:\"\\\\)\",r:5},{b:\"q[qwxr]?\\\\s*\\\\[\",e:\"\\\\]\",r:5},{b:\"q[qwxr]?\\\\s*\\\\{\",e:\"\\\\}\",r:5},{b:\"q[qwxr]?\\\\s*\\\\|\",e:\"\\\\|\",r:5},{b:\"q[qwxr]?\\\\s*\\\\<\",e:\"\\\\>\",r:5},{b:\"qw\\\\s+q\",e:\"q\",r:5},{b:\"'\",e:\"'\",c:[e.BE]},{b:'\"',e:'\"'},{b:\"`\",e:\"`\",c:[e.BE]},{b:\"{\\\\w+}\",c:[],r:0},{b:\"-?\\\\w+\\\\s*\\\\=\\\\>\",c:[],r:0}]},{cN:\"number\",b:\"(\\\\b0[0-7_]+)|(\\\\b0x[0-9a-fA-F_]+)|(\\\\b[1-9][0-9_]*(\\\\.[0-9_]+)?)|[0_]\\\\b\",r:0},{b:\"(\\\\/\\\\/|\"+e.RSR+\"|\\\\b(split|return|print|reverse|grep)\\\\b)\\\\s*\",k:\"split return print reverse grep\",r:0,c:[e.HCM,{cN:\"regexp\",b:\"(s|tr|y)/(\\\\\\\\.|[^/])*/(\\\\\\\\.|[^/])*/[a-z]*\",r:10},{cN:\"regexp\",b:\"(m|qr)?/\",e:\"/[a-z]*\",c:[e.BE],r:0}]},{cN:\"sub\",bK:\"sub\",e:\"(\\\\s*\\\\(.*?\\\\))?[;{]\",r:5},{cN:\"operator\",b:\"-\\\\w\\\\b\",r:0},{b:\"^__DATA__$\",e:\"^__END__$\",sL:\"mojolicious\",c:[{b:\"^@@.*\",e:\"$\",cN:\"comment\"}]}];return r.c=i,s.c=i,{aliases:[\"pl\"],k:t,c:i}});hljs.registerLanguage(\"cs\",function(e){var r=\"abstract as base bool break byte case catch char checked const continue decimal dynamic default delegate do double else enum event explicit extern false finally fixed float for foreach goto if implicit in int interface internal is lock long null when object operator out override params private protected public readonly ref sbyte sealed short sizeof stackalloc static string struct switch this true try typeof uint ulong unchecked unsafe ushort using virtual volatile void while async protected public private internal ascending descending from get group into join let orderby partial select set value var where yield\",t=e.IR+\"(<\"+e.IR+\">)?\";return{aliases:[\"csharp\"],k:r,i:/::/,c:[e.C(\"///\",\"$\",{rB:!0,c:[{cN:\"xmlDocTag\",v:[{b:\"///\",r:0},{b:\"<!--|-->\"},{b:\"</?\",e:\">\"}]}]}),e.CLCM,e.CBCM,{cN:\"preprocessor\",b:\"#\",e:\"$\",k:\"if else elif endif define undef warning error line region endregion pragma checksum\"},{cN:\"string\",b:'@\"',e:'\"',c:[{b:'\"\"'}]},e.ASM,e.QSM,e.CNM,{bK:\"class interface\",e:/[{;=]/,i:/[^\\s:]/,c:[e.TM,e.CLCM,e.CBCM]},{bK:\"namespace\",e:/[{;=]/,i:/[^\\s:]/,c:[{cN:\"title\",b:\"[a-zA-Z](\\\\.?\\\\w)*\",r:0},e.CLCM,e.CBCM]},{bK:\"new return throw await\",r:0},{cN:\"function\",b:\"(\"+t+\"\\\\s+)+\"+e.IR+\"\\\\s*\\\\(\",rB:!0,e:/[{;=]/,eE:!0,k:r,c:[{b:e.IR+\"\\\\s*\\\\(\",rB:!0,c:[e.TM],r:0},{cN:\"params\",b:/\\(/,e:/\\)/,eB:!0,eE:!0,k:r,r:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]}]}});hljs.registerLanguage(\"apache\",function(e){var r={cN:\"number\",b:\"[\\\\$%]\\\\d+\"};return{aliases:[\"apacheconf\"],cI:!0,c:[e.HCM,{cN:\"tag\",b:\"</?\",e:\">\"},{cN:\"keyword\",b:/\\w+/,r:0,k:{common:\"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername\"},starts:{e:/$/,r:0,k:{literal:\"on off all\"},c:[{cN:\"sqbracket\",b:\"\\\\s\\\\[\",e:\"\\\\]$\"},{cN:\"cbracket\",b:\"[\\\\$%]\\\\{\",e:\"\\\\}\",c:[\"self\",r]},r,e.QSM]}}],i:/\\S/}});hljs.registerLanguage(\"http\",function(t){return{aliases:[\"https\"],i:\"\\\\S\",c:[{cN:\"status\",b:\"^HTTP/[0-9\\\\.]+\",e:\"$\",c:[{cN:\"number\",b:\"\\\\b\\\\d{3}\\\\b\"}]},{cN:\"request\",b:\"^[A-Z]+ (.*?) HTTP/[0-9\\\\.]+$\",rB:!0,e:\"$\",c:[{cN:\"string\",b:\" \",e:\" \",eB:!0,eE:!0}]},{cN:\"attribute\",b:\"^\\\\w\",e:\": \",eE:!0,i:\"\\\\n|\\\\s|=\",starts:{cN:\"string\",e:\"$\"}},{b:\"\\\\n\\\\n\",starts:{sL:[],eW:!0}}]}});hljs.registerLanguage(\"objectivec\",function(e){var t={cN:\"built_in\",b:\"(AV|CA|CF|CG|CI|MK|MP|NS|UI)\\\\w+\"},i={keyword:\"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required\",literal:\"false true FALSE TRUE nil YES NO NULL\",built_in:\"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once\"},o=/[a-zA-Z@][a-zA-Z0-9_]*/,n=\"@interface @class @protocol @implementation\";return{aliases:[\"mm\",\"objc\",\"obj-c\"],k:i,l:o,i:\"</\",c:[t,e.CLCM,e.CBCM,e.CNM,e.QSM,{cN:\"string\",v:[{b:'@\"',e:'\"',i:\"\\\\n\",c:[e.BE]},{b:\"'\",e:\"[^\\\\\\\\]'\",i:\"[^\\\\\\\\][^']\"}]},{cN:\"preprocessor\",b:\"#\",e:\"$\",c:[{cN:\"title\",v:[{b:'\"',e:'\"'},{b:\"<\",e:\">\"}]}]},{cN:\"class\",b:\"(\"+n.split(\" \").join(\"|\")+\")\\\\b\",e:\"({|$)\",eE:!0,k:n,l:o,c:[e.UTM]},{cN:\"variable\",b:\"\\\\.\"+e.UIR,r:0}]}});hljs.registerLanguage(\"python\",function(e){var r={cN:\"prompt\",b:/^(>>>|\\.\\.\\.) /},b={cN:\"string\",c:[e.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[r],r:10},{b:/(u|b)?r?\"\"\"/,e:/\"\"\"/,c:[r],r:10},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)\"/,e:/\"/,r:10},{b:/(b|br)'/,e:/'/},{b:/(b|br)\"/,e:/\"/},e.ASM,e.QSM]},a={cN:\"number\",r:0,v:[{b:e.BNR+\"[lLjJ]?\"},{b:\"\\\\b(0o[0-7]+)[lLjJ]?\"},{b:e.CNR+\"[lLjJ]?\"}]},l={cN:\"params\",b:/\\(/,e:/\\)/,c:[\"self\",r,a,b]};return{aliases:[\"py\",\"gyp\"],k:{keyword:\"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10 None True False\",built_in:\"Ellipsis NotImplemented\"},i:/(<\\/|->|\\?)/,c:[r,a,b,e.HCM,{v:[{cN:\"function\",bK:\"def\",r:10},{cN:\"class\",bK:\"class\"}],e:/:/,i:/[${=;\\n,]/,c:[e.UTM,l]},{cN:\"decorator\",b:/^[\\t ]*@/,e:/$/},{b:/\\b(print|exec)\\(/}]}});hljs.registerLanguage(\"java\",function(e){var a=e.UIR+\"(<\"+e.UIR+\">)?\",t=\"false synchronized int abstract float private char boolean static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private\",c=\"\\\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\\\d]+[\\\\d_]+[\\\\d]+|[\\\\d]+)(\\\\.([\\\\d]+[\\\\d_]+[\\\\d]+|[\\\\d]+))?|\\\\.([\\\\d]+[\\\\d_]+[\\\\d]+|[\\\\d]+))([eE][-+]?\\\\d+)?)[lLfF]?\",r={cN:\"number\",b:c,r:0};return{aliases:[\"jsp\"],k:t,i:/<\\/|#/,c:[e.C(\"/\\\\*\\\\*\",\"\\\\*/\",{r:0,c:[{cN:\"doctag\",b:\"@[A-Za-z]+\"}]}),e.CLCM,e.CBCM,e.ASM,e.QSM,{cN:\"class\",bK:\"class interface\",e:/[{;=]/,eE:!0,k:\"class interface\",i:/[:\"\\[\\]]/,c:[{bK:\"extends implements\"},e.UTM]},{bK:\"new throw return else\",r:0},{cN:\"function\",b:\"(\"+a+\"\\\\s+)+\"+e.UIR+\"\\\\s*\\\\(\",rB:!0,e:/[{;=]/,eE:!0,k:t,c:[{b:e.UIR+\"\\\\s*\\\\(\",rB:!0,r:0,c:[e.UTM]},{cN:\"params\",b:/\\(/,e:/\\)/,k:t,r:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]},r,{cN:\"annotation\",b:\"@[A-Za-z]+\"}]}});hljs.registerLanguage(\"bash\",function(e){var t={cN:\"variable\",v:[{b:/\\$[\\w\\d#@][\\w\\d_]*/},{b:/\\$\\{(.*?)}/}]},s={cN:\"string\",b:/\"/,e:/\"/,c:[e.BE,t,{cN:\"variable\",b:/\\$\\(/,e:/\\)/,c:[e.BE]}]},a={cN:\"string\",b:/'/,e:/'/};return{aliases:[\"sh\",\"zsh\"],l:/-?[a-z\\.]+/,k:{keyword:\"if then else elif fi for while in do done case esac function\",literal:\"true false\",built_in:\"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp\",operator:\"-ne -eq -lt -gt -f -d -e -s -l -a\"},c:[{cN:\"shebang\",b:/^#![^\\n]+sh\\s*$/,r:10},{cN:\"function\",b:/\\w[\\w\\d_]*\\s*\\(\\s*\\)\\s*\\{/,rB:!0,c:[e.inherit(e.TM,{b:/\\w[\\w\\d_]*/})],r:0},e.HCM,e.NM,s,a,t]}});hljs.registerLanguage(\"sql\",function(e){var t=e.C(\"--\",\"$\");return{cI:!0,i:/[<>{}*]/,c:[{cN:\"operator\",bK:\"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup revoke\",e:/;/,eW:!0,k:{keyword:\"abort abs absolute acc acce accep accept access accessed accessible account acos action activate add addtime admin administer advanced advise aes_decrypt aes_encrypt after agent aggregate ali alia alias allocate allow alter always analyze ancillary and any anydata anydataset anyschema anytype apply archive archived archivelog are as asc ascii asin assembly assertion associate asynchronous at atan atn2 attr attri attrib attribu attribut attribute attributes audit authenticated authentication authid authors auto autoallocate autodblink autoextend automatic availability avg backup badfile basicfile before begin beginning benchmark between bfile bfile_base big bigfile bin binary_double binary_float binlog bit_and bit_count bit_length bit_or bit_xor bitmap blob_base block blocksize body both bound buffer_cache buffer_pool build bulk by byte byteordermark bytes c cache caching call calling cancel capacity cascade cascaded case cast catalog category ceil ceiling chain change changed char_base char_length character_length characters characterset charindex charset charsetform charsetid check checksum checksum_agg child choose chr chunk class cleanup clear client clob clob_base clone close cluster_id cluster_probability cluster_set clustering coalesce coercibility col collate collation collect colu colum column column_value columns columns_updated comment commit compact compatibility compiled complete composite_limit compound compress compute concat concat_ws concurrent confirm conn connec connect connect_by_iscycle connect_by_isleaf connect_by_root connect_time connection consider consistent constant constraint constraints constructor container content contents context contributors controlfile conv convert convert_tz corr corr_k corr_s corresponding corruption cos cost count count_big counted covar_pop covar_samp cpu_per_call cpu_per_session crc32 create creation critical cross cube cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime customdatum cycle d data database databases datafile datafiles datalength date_add date_cache date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts day day_to_second dayname dayofmonth dayofweek dayofyear days db_role_change dbtimezone ddl deallocate declare decode decompose decrement decrypt deduplicate def defa defau defaul default defaults deferred defi defin define degrees delayed delegate delete delete_all delimited demand dense_rank depth dequeue des_decrypt des_encrypt des_key_file desc descr descri describ describe descriptor deterministic diagnostics difference dimension direct_load directory disable disable_all disallow disassociate discardfile disconnect diskgroup distinct distinctrow distribute distributed div do document domain dotnet double downgrade drop dumpfile duplicate duration e each edition editionable editions element ellipsis else elsif elt empty enable enable_all enclosed encode encoding encrypt end end-exec endian enforced engine engines enqueue enterprise entityescaping eomonth error errors escaped evalname evaluate event eventdata events except exception exceptions exchange exclude excluding execu execut execute exempt exists exit exp expire explain export export_set extended extent external external_1 external_2 externally extract f failed failed_login_attempts failover failure far fast feature_set feature_value fetch field fields file file_name_convert filesystem_like_logging final finish first first_value fixed flash_cache flashback floor flush following follows for forall force form forma format found found_rows freelist freelists freepools fresh from from_base64 from_days ftp full function g general generated get get_format get_lock getdate getutcdate global global_name globally go goto grant grants greatest group group_concat group_id grouping grouping_id groups gtid_subtract guarantee guard handler hash hashkeys having hea head headi headin heading heap help hex hierarchy high high_priority hosts hour http i id ident_current ident_incr ident_seed identified identity idle_time if ifnull ignore iif ilike ilm immediate import in include including increment index indexes indexing indextype indicator indices inet6_aton inet6_ntoa inet_aton inet_ntoa infile initial initialized initially initrans inmemory inner innodb input insert install instance instantiable instr interface interleaved intersect into invalidate invisible is is_free_lock is_ipv4 is_ipv4_compat is_not is_not_null is_used_lock isdate isnull isolation iterate java join json json_exists k keep keep_duplicates key keys kill l language large last last_day last_insert_id last_value lax lcase lead leading least leaves left len lenght length less level levels library like like2 like4 likec limit lines link list listagg little ln load load_file lob lobs local localtime localtimestamp locate locator lock locked log log10 log2 logfile logfiles logging logical logical_reads_per_call logoff logon logs long loop low low_priority lower lpad lrtrim ltrim m main make_set makedate maketime managed management manual map mapping mask master master_pos_wait match matched materialized max maxextents maximize maxinstances maxlen maxlogfiles maxloghistory maxlogmembers maxsize maxtrans md5 measures median medium member memcompress memory merge microsecond mid migration min minextents minimum mining minus minute minvalue missing mod mode model modification modify module monitoring month months mount move movement multiset mutex n name name_const names nan national native natural nav nchar nclob nested never new newline next nextval no no_write_to_binlog noarchivelog noaudit nobadfile nocheck nocompress nocopy nocycle nodelay nodiscardfile noentityescaping noguarantee nokeep nologfile nomapping nomaxvalue nominimize nominvalue nomonitoring none noneditionable nonschema noorder nopr nopro noprom nopromp noprompt norely noresetlogs noreverse normal norowdependencies noschemacheck noswitch not nothing notice notrim novalidate now nowait nth_value nullif nulls num numb numbe nvarchar nvarchar2 object ocicoll ocidate ocidatetime ociduration ociinterval ociloblocator ocinumber ociref ocirefcursor ocirowid ocistring ocitype oct octet_length of off offline offset oid oidindex old on online only opaque open operations operator optimal optimize option optionally or oracle oracle_date oradata ord ordaudio orddicom orddoc order ordimage ordinality ordvideo organization orlany orlvary out outer outfile outline output over overflow overriding p package pad parallel parallel_enable parameters parent parse partial partition partitions pascal passing password password_grace_time password_lock_time password_reuse_max password_reuse_time password_verify_function patch path patindex pctincrease pctthreshold pctused pctversion percent percent_rank percentile_cont percentile_disc performance period period_add period_diff permanent physical pi pipe pipelined pivot pluggable plugin policy position post_transaction pow power pragma prebuilt precedes preceding precision prediction prediction_cost prediction_details prediction_probability prediction_set prepare present preserve prior priority private private_sga privileges procedural procedure procedure_analyze processlist profiles project prompt protection public publishingservername purge quarter query quick quiesce quota quotename radians raise rand range rank raw read reads readsize rebuild record records recover recovery recursive recycle redo reduced ref reference referenced references referencing refresh regexp_like register regr_avgx regr_avgy regr_count regr_intercept regr_r2 regr_slope regr_sxx regr_sxy reject rekey relational relative relaylog release release_lock relies_on relocate rely rem remainder rename repair repeat replace replicate replication required reset resetlogs resize resource respect restore restricted result result_cache resumable resume retention return returning returns reuse reverse revoke right rlike role roles rollback rolling rollup round row row_count rowdependencies rowid rownum rows rtrim rules safe salt sample save savepoint sb1 sb2 sb4 scan schema schemacheck scn scope scroll sdo_georaster sdo_topo_geometry search sec_to_time second section securefile security seed segment select self sequence sequential serializable server servererror session session_user sessions_per_user set sets settings sha sha1 sha2 share shared shared_pool short show shrink shutdown si_averagecolor si_colorhistogram si_featurelist si_positionalcolor si_stillimage si_texture siblings sid sign sin size size_t sizes skip slave sleep smalldatetimefromparts smallfile snapshot some soname sort soundex source space sparse spfile split sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_small_result sql_variant_property sqlcode sqldata sqlerror sqlname sqlstate sqrt square standalone standby start starting startup statement static statistics stats_binomial_test stats_crosstab stats_ks_test stats_mode stats_mw_test stats_one_way_anova stats_t_test_ stats_t_test_indep stats_t_test_one stats_t_test_paired stats_wsr_test status std stddev stddev_pop stddev_samp stdev stop storage store stored str str_to_date straight_join strcmp strict string struct stuff style subdate subpartition subpartitions substitutable substr substring subtime subtring_index subtype success sum suspend switch switchoffset switchover sync synchronous synonym sys sys_xmlagg sysasm sysaux sysdate sysdatetimeoffset sysdba sysoper system system_user sysutcdatetime t table tables tablespace tan tdo template temporary terminated tertiary_weights test than then thread through tier ties time time_format time_zone timediff timefromparts timeout timestamp timestampadd timestampdiff timezone_abbr timezone_minute timezone_region to to_base64 to_date to_days to_seconds todatetimeoffset trace tracking transaction transactional translate translation treat trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse type ub1 ub2 ub4 ucase unarchived unbounded uncompress under undo unhex unicode uniform uninstall union unique unix_timestamp unknown unlimited unlock unpivot unrecoverable unsafe unsigned until untrusted unusable unused update updated upgrade upped upper upsert url urowid usable usage use use_stored_outlines user user_data user_resources users using utc_date utc_timestamp uuid uuid_short validate validate_password_strength validation valist value values var var_samp varcharc vari varia variab variabl variable variables variance varp varraw varrawc varray verify version versions view virtual visible void wait wallet warning warnings week weekday weekofyear wellformed when whene whenev wheneve whenever where while whitespace with within without work wrapped xdb xml xmlagg xmlattributes xmlcast xmlcolattval xmlelement xmlexists xmlforest xmlindex xmlnamespaces xmlpi xmlquery xmlroot xmlschema xmlserialize xmltable xmltype xor year year_to_month years yearweek\",literal:\"true false null\",built_in:\"array bigint binary bit blob boolean char character date dec decimal float int int8 integer interval number numeric real record serial serial8 smallint text varchar varying void\"},c:[{cN:\"string\",b:\"'\",e:\"'\",c:[e.BE,{b:\"''\"}]},{cN:\"string\",b:'\"',e:'\"',c:[e.BE,{b:'\"\"'}]},{cN:\"string\",b:\"`\",e:\"`\",c:[e.BE]},e.CNM,e.CBCM,t]},e.CBCM,t]}});hljs.registerLanguage(\"nginx\",function(e){var r={cN:\"variable\",v:[{b:/\\$\\d+/},{b:/\\$\\{/,e:/}/},{b:\"[\\\\$\\\\@]\"+e.UIR}]},b={eW:!0,l:\"[a-z/_]+\",k:{built_in:\"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll\"},r:0,i:\"=>\",c:[e.HCM,{cN:\"string\",c:[e.BE,r],v:[{b:/\"/,e:/\"/},{b:/'/,e:/'/}]},{cN:\"url\",b:\"([a-z]+):/\",e:\"\\\\s\",eW:!0,eE:!0,c:[r]},{cN:\"regexp\",c:[e.BE,r],v:[{b:\"\\\\s\\\\^\",e:\"\\\\s|{|;\",rE:!0},{b:\"~\\\\*?\\\\s+\",e:\"\\\\s|{|;\",rE:!0},{b:\"\\\\*(\\\\.[a-z\\\\-]+)+\"},{b:\"([a-z\\\\-]+\\\\.)+\\\\*\"}]},{cN:\"number\",b:\"\\\\b\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}(:\\\\d{1,5})?\\\\b\"},{cN:\"number\",b:\"\\\\b\\\\d+[kKmMgGdshdwy]*\\\\b\",r:0},r]};return{aliases:[\"nginxconf\"],c:[e.HCM,{b:e.UIR+\"\\\\s\",e:\";|{\",rB:!0,c:[{cN:\"title\",b:e.UIR,starts:b}],r:0}],i:\"[^\\\\s\\\\}]\"}});hljs.registerLanguage(\"cpp\",function(t){var e={cN:\"keyword\",b:\"\\\\b[a-z\\\\d_]*_t\\\\b\"},r={cN:\"string\",v:[t.inherit(t.QSM,{b:'((u8?|U)|L)?\"'}),{b:'(u8?|U)?R\"',e:'\"',c:[t.BE]},{b:\"'\\\\\\\\?.\",e:\"'\",i:\".\"}]},s={cN:\"number\",v:[{b:\"\\\\b(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+)(u|U|l|L|ul|UL|f|F)\"},{b:t.CNR}]},i={cN:\"preprocessor\",b:\"#\",e:\"$\",k:\"if else elif endif define undef warning error line pragma ifdef ifndef\",c:[{b:/\\\\\\n/,r:0},{bK:\"include\",e:\"$\",c:[r,{cN:\"string\",b:\"<\",e:\">\",i:\"\\\\n\"}]},r,s,t.CLCM,t.CBCM]},a=t.IR+\"\\\\s*\\\\(\",c={keyword:\"int float while private char catch export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const struct for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using class asm case typeid short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignof constexpr decltype noexcept static_assert thread_local restrict _Bool complex _Complex _Imaginary atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong\",built_in:\"std string cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap array shared_ptr abort abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf\",literal:\"true false nullptr NULL\"};return{aliases:[\"c\",\"cc\",\"h\",\"c++\",\"h++\",\"hpp\"],k:c,i:\"</\",c:[e,t.CLCM,t.CBCM,s,r,i,{b:\"\\\\b(deque|list|queue|stack|vector|map|set|bitset|multiset|multimap|unordered_map|unordered_set|unordered_multiset|unordered_multimap|array)\\\\s*<\",e:\">\",k:c,c:[\"self\",e]},{b:t.IR+\"::\",k:c},{bK:\"new throw return else\",r:0},{cN:\"function\",b:\"(\"+t.IR+\"[\\\\*&\\\\s]+)+\"+a,rB:!0,e:/[{;=]/,eE:!0,k:c,i:/[^\\w\\s\\*&]/,c:[{b:a,rB:!0,c:[t.TM],r:0},{cN:\"params\",b:/\\(/,e:/\\)/,k:c,r:0,c:[t.CLCM,t.CBCM,r,s]},t.CLCM,t.CBCM,i]}]}});hljs.registerLanguage(\"php\",function(e){var c={cN:\"variable\",b:\"\\\\$+[a-zA-Z_-ÿ][a-zA-Z0-9_-ÿ]*\"},a={cN:\"preprocessor\",b:/<\\?(php)?|\\?>/},i={cN:\"string\",c:[e.BE,a],v:[{b:'b\"',e:'\"'},{b:\"b'\",e:\"'\"},e.inherit(e.ASM,{i:null}),e.inherit(e.QSM,{i:null})]},t={v:[e.BNM,e.CNM]};return{aliases:[\"php3\",\"php4\",\"php5\",\"php6\"],cI:!0,k:\"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception default die require __FUNCTION__ enddeclare final try switch continue endfor endif declare unset true false trait goto instanceof insteadof __DIR__ __NAMESPACE__ yield finally\",c:[e.CLCM,e.HCM,e.C(\"/\\\\*\",\"\\\\*/\",{c:[{cN:\"doctag\",b:\"@[A-Za-z]+\"},a]}),e.C(\"__halt_compiler.+?;\",!1,{eW:!0,k:\"__halt_compiler\",l:e.UIR}),{cN:\"string\",b:/<<<['\"]?\\w+['\"]?$/,e:/^\\w+;?$/,c:[e.BE,{cN:\"subst\",v:[{b:/\\$\\w+/},{b:/\\{\\$/,e:/\\}/}]}]},a,c,{b:/(::|->)+[a-zA-Z_\\x7f-\\xff][a-zA-Z0-9_\\x7f-\\xff]*/},{cN:\"function\",bK:\"function\",e:/[;{]/,eE:!0,i:\"\\\\$|\\\\[|%\",c:[e.UTM,{cN:\"params\",b:\"\\\\(\",e:\"\\\\)\",c:[\"self\",c,e.CBCM,i,t]}]},{cN:\"class\",bK:\"class interface\",e:\"{\",eE:!0,i:/[:\\(\\$\"]/,c:[{bK:\"extends implements\"},e.UTM]},{bK:\"namespace\",e:\";\",i:/[\\.']/,c:[e.UTM]},{bK:\"use\",e:\";\",c:[e.UTM]},{b:\"=>\"},i,t]}});hljs.registerLanguage(\"coffeescript\",function(e){var c={keyword:\"in if for while finally new do return else break catch instanceof throw try this switch continue typeof delete debugger super then unless until loop of by when and or is isnt not\",literal:\"true false null undefined yes no on off\",built_in:\"npm require console print module global window document\"},n=\"[A-Za-z$_][0-9A-Za-z$_]*\",r={cN:\"subst\",b:/#\\{/,e:/}/,k:c},t=[e.BNM,e.inherit(e.CNM,{starts:{e:\"(\\\\s*/)?\",r:0}}),{cN:\"string\",v:[{b:/'''/,e:/'''/,c:[e.BE]},{b:/'/,e:/'/,c:[e.BE]},{b:/\"\"\"/,e:/\"\"\"/,c:[e.BE,r]},{b:/\"/,e:/\"/,c:[e.BE,r]}]},{cN:\"regexp\",v:[{b:\"///\",e:\"///\",c:[r,e.HCM]},{b:\"//[gim]*\",r:0},{b:/\\/(?![ *])(\\\\\\/|.)*?\\/[gim]*(?=\\W|$)/}]},{cN:\"property\",b:\"@\"+n},{b:\"`\",e:\"`\",eB:!0,eE:!0,sL:\"javascript\"}];r.c=t;var s=e.inherit(e.TM,{b:n}),i=\"(\\\\(.*\\\\))?\\\\s*\\\\B[-=]>\",o={cN:\"params\",b:\"\\\\([^\\\\(]\",rB:!0,c:[{b:/\\(/,e:/\\)/,k:c,c:[\"self\"].concat(t)}]};return{aliases:[\"coffee\",\"cson\",\"iced\"],k:c,i:/\\/\\*/,c:t.concat([e.C(\"###\",\"###\"),e.HCM,{cN:\"function\",b:\"^\\\\s*\"+n+\"\\\\s*=\\\\s*\"+i,e:\"[-=]>\",rB:!0,c:[s,o]},{b:/[:\\(,=]\\s*/,r:0,c:[{cN:\"function\",b:i,e:\"[-=]>\",rB:!0,c:[o]}]},{cN:\"class\",bK:\"class\",e:\"$\",i:/[:=\"\\[\\]]/,c:[{bK:\"extends\",eW:!0,i:/[:=\"\\[\\]]/,c:[s]},s]},{cN:\"attribute\",b:n+\":\",e:\":\",rB:!0,rE:!0,r:0}])}});hljs.registerLanguage(\"javascript\",function(e){return{aliases:[\"js\"],k:{keyword:\"in of if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const export super debugger as async await\",literal:\"true false null undefined NaN Infinity\",built_in:\"eval isFinite isNaN parseFloat parseInt decodeURI decodeURIComponent encodeURI encodeURIComponent escape unescape Object Function Boolean Error EvalError InternalError RangeError ReferenceError StopIteration SyntaxError TypeError URIError Number Math Date String RegExp Array Float32Array Float64Array Int16Array Int32Array Int8Array Uint16Array Uint32Array Uint8Array Uint8ClampedArray ArrayBuffer DataView JSON Intl arguments require module console window document Symbol Set Map WeakSet WeakMap Proxy Reflect Promise\"},c:[{cN:\"pi\",r:10,b:/^\\s*['\"]use (strict|asm)['\"]/},e.ASM,e.QSM,{cN:\"string\",b:\"`\",e:\"`\",c:[e.BE,{cN:\"subst\",b:\"\\\\$\\\\{\",e:\"\\\\}\"}]},e.CLCM,e.CBCM,{cN:\"number\",v:[{b:\"\\\\b(0[bB][01]+)\"},{b:\"\\\\b(0[oO][0-7]+)\"},{b:e.CNR}],r:0},{b:\"(\"+e.RSR+\"|\\\\b(case|return|throw)\\\\b)\\\\s*\",k:\"return throw case\",c:[e.CLCM,e.CBCM,e.RM,{b:/</,e:/>\\s*[);\\]]/,r:0,sL:\"xml\"}],r:0},{cN:\"function\",bK:\"function\",e:/\\{/,eE:!0,c:[e.inherit(e.TM,{b:/[A-Za-z$_][0-9A-Za-z$_]*/}),{cN:\"params\",b:/\\(/,e:/\\)/,eB:!0,eE:!0,c:[e.CLCM,e.CBCM]}],i:/\\[|%/},{b:/\\$[(.]/},{b:\"\\\\.\"+e.IR,r:0},{bK:\"import\",e:\"[;$]\",k:\"import from as\",c:[e.ASM,e.QSM]},{cN:\"class\",bK:\"class\",e:/[{;=]/,eE:!0,i:/[:\"\\[\\]]/,c:[{bK:\"extends\"},e.UTM]}],i:/#/}});hljs.registerLanguage(\"ini\",function(e){var c={cN:\"string\",c:[e.BE],v:[{b:\"'''\",e:\"'''\",r:10},{b:'\"\"\"',e:'\"\"\"',r:10},{b:'\"',e:'\"'},{b:\"'\",e:\"'\"}]};return{aliases:[\"toml\"],cI:!0,i:/\\S/,c:[e.C(\";\",\"$\"),e.HCM,{cN:\"title\",b:/^\\s*\\[+/,e:/\\]+/},{cN:\"setting\",b:/^[a-z0-9\\[\\]_-]+\\s*=\\s*/,e:\"$\",c:[{cN:\"value\",eW:!0,k:\"on off true false yes no\",c:[{cN:\"variable\",v:[{b:/\\$[\\w\\d\"][\\w\\d_]*/},{b:/\\$\\{(.*?)}/}]},c,{cN:\"number\",b:/([\\+\\-]+)?[\\d]+_[\\d_]+/},e.NM],r:0}]}]}});hljs.registerLanguage(\"diff\",function(e){return{aliases:[\"patch\"],c:[{cN:\"chunk\",r:10,v:[{b:/^@@ +\\-\\d+,\\d+ +\\+\\d+,\\d+ +@@$/},{b:/^\\*\\*\\* +\\d+,\\d+ +\\*\\*\\*\\*$/},{b:/^\\-\\-\\- +\\d+,\\d+ +\\-\\-\\-\\-$/}]},{cN:\"header\",v:[{b:/Index: /,e:/$/},{b:/=====/,e:/=====$/},{b:/^\\-\\-\\-/,e:/$/},{b:/^\\*{3} /,e:/$/},{b:/^\\+\\+\\+/,e:/$/},{b:/\\*{5}/,e:/\\*{5}$/}]},{cN:\"addition\",b:\"^\\\\+\",e:\"$\"},{cN:\"deletion\",b:\"^\\\\-\",e:\"$\"},{cN:\"change\",b:\"^\\\\!\",e:\"$\"}]}});\nexports.hljs = hljs;\n"
        },
        "$:/plugins/tiddlywiki/highlight/highlight.css": {
            "type": "text/css",
            "title": "$:/plugins/tiddlywiki/highlight/highlight.css",
            "tags": "[[$:/tags/Stylesheet]]",
            "text": "/*\n\nOriginal style from softwaremaniacs.org (c) Ivan Sagalaev <Maniac@SoftwareManiacs.Org>\n\n*/\n\n.hljs {\n  display: block;\n  overflow-x: auto;\n  padding: 0.5em;\n  background: #f0f0f0;\n  -webkit-text-size-adjust: none;\n}\n\n.hljs,\n.hljs-subst,\n.hljs-tag .hljs-title,\n.nginx .hljs-title {\n  color: black;\n}\n\n.hljs-string,\n.hljs-title,\n.hljs-constant,\n.hljs-parent,\n.hljs-tag .hljs-value,\n.hljs-rule .hljs-value,\n.hljs-preprocessor,\n.hljs-pragma,\n.hljs-name,\n.haml .hljs-symbol,\n.ruby .hljs-symbol,\n.ruby .hljs-symbol .hljs-string,\n.hljs-template_tag,\n.django .hljs-variable,\n.smalltalk .hljs-class,\n.hljs-addition,\n.hljs-flow,\n.hljs-stream,\n.bash .hljs-variable,\n.pf .hljs-variable,\n.apache .hljs-tag,\n.apache .hljs-cbracket,\n.tex .hljs-command,\n.tex .hljs-special,\n.erlang_repl .hljs-function_or_atom,\n.asciidoc .hljs-header,\n.markdown .hljs-header,\n.coffeescript .hljs-attribute,\n.tp .hljs-variable {\n  color: #800;\n}\n\n.smartquote,\n.hljs-comment,\n.hljs-annotation,\n.diff .hljs-header,\n.hljs-chunk,\n.asciidoc .hljs-blockquote,\n.markdown .hljs-blockquote {\n  color: #888;\n}\n\n.hljs-number,\n.hljs-date,\n.hljs-regexp,\n.hljs-literal,\n.hljs-hexcolor,\n.smalltalk .hljs-symbol,\n.smalltalk .hljs-char,\n.go .hljs-constant,\n.hljs-change,\n.lasso .hljs-variable,\n.makefile .hljs-variable,\n.asciidoc .hljs-bullet,\n.markdown .hljs-bullet,\n.asciidoc .hljs-link_url,\n.markdown .hljs-link_url {\n  color: #080;\n}\n\n.hljs-label,\n.ruby .hljs-string,\n.hljs-decorator,\n.hljs-filter .hljs-argument,\n.hljs-localvars,\n.hljs-array,\n.hljs-attr_selector,\n.hljs-important,\n.hljs-pseudo,\n.hljs-pi,\n.haml .hljs-bullet,\n.hljs-doctype,\n.hljs-deletion,\n.hljs-envvar,\n.hljs-shebang,\n.apache .hljs-sqbracket,\n.nginx .hljs-built_in,\n.tex .hljs-formula,\n.erlang_repl .hljs-reserved,\n.hljs-prompt,\n.asciidoc .hljs-link_label,\n.markdown .hljs-link_label,\n.vhdl .hljs-attribute,\n.clojure .hljs-attribute,\n.asciidoc .hljs-attribute,\n.lasso .hljs-attribute,\n.coffeescript .hljs-property,\n.hljs-phony {\n  color: #88f;\n}\n\n.hljs-keyword,\n.hljs-id,\n.hljs-title,\n.hljs-built_in,\n.css .hljs-tag,\n.hljs-doctag,\n.smalltalk .hljs-class,\n.hljs-winutils,\n.bash .hljs-variable,\n.pf .hljs-variable,\n.apache .hljs-tag,\n.hljs-type,\n.hljs-typename,\n.tex .hljs-command,\n.asciidoc .hljs-strong,\n.markdown .hljs-strong,\n.hljs-request,\n.hljs-status,\n.tp .hljs-data,\n.tp .hljs-io {\n  font-weight: bold;\n}\n\n.asciidoc .hljs-emphasis,\n.markdown .hljs-emphasis,\n.tp .hljs-units {\n  font-style: italic;\n}\n\n.nginx .hljs-built_in {\n  font-weight: normal;\n}\n\n.coffeescript .javascript,\n.javascript .xml,\n.lasso .markup,\n.tex .hljs-formula,\n.xml .javascript,\n.xml .vbscript,\n.xml .css,\n.xml .hljs-cdata {\n  opacity: 0.5;\n}\n"
        },
        "$:/plugins/tiddlywiki/highlight/highlightblock.js": {
            "text": "/*\\\ntitle: $:/plugins/tiddlywiki/highlight/highlightblock.js\ntype: application/javascript\nmodule-type: widget\n\nWraps up the fenced code blocks parser for highlight and use in TiddlyWiki5\n\n\\*/\n(function() {\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar CodeBlockWidget = require(\"$:/core/modules/widgets/codeblock.js\").codeblock;\n\nvar hljs = require(\"$:/plugins/tiddlywiki/highlight/highlight.js\");\n\nhljs.configure({tabReplace: \"    \"});\t\n\nCodeBlockWidget.prototype.postRender = function() {\n\tvar domNode = this.domNodes[0];\n\tif($tw.browser && this.document !== $tw.fakeDocument && this.language) {\n\t\tdomNode.className = this.language.toLowerCase();\n\t\thljs.highlightBlock(domNode);\n\t} else if(!$tw.browser && this.language && this.language.indexOf(\"/\") === -1 ){\n\t\ttry {\n\t\t\tdomNode.className = this.language.toLowerCase() + \" hljs\";\n\t\t\tdomNode.children[0].innerHTML = hljs.fixMarkup(hljs.highlight(this.language, this.getAttribute(\"code\")).value);\n\t\t}\n\t\tcatch(err) {\n\t\t\t// Can't easily tell if a language is registered or not in the packed version of hightlight.js,\n\t\t\t// so we silently fail and the codeblock remains unchanged\n\t\t}\n\t}\t\n};\n\n})();\n",
            "title": "$:/plugins/tiddlywiki/highlight/highlightblock.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/plugins/tiddlywiki/highlight/license": {
            "title": "$:/plugins/tiddlywiki/highlight/license",
            "type": "text/plain",
            "text": "Copyright (c) 2006, Ivan Sagalaev\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright\n      notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of highlight.js nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY\nEXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
        },
        "$:/plugins/tiddlywiki/highlight/readme": {
            "title": "$:/plugins/tiddlywiki/highlight/readme",
            "text": "This plugin provides syntax highlighting of code blocks using v8.8.0 of [[highlight.js|https://github.com/isagalaev/highlight.js]] from Ivan Sagalaev.\n\n! Usage\n\nWhen the plugin is installed it automatically applies highlighting to all codeblocks defined with triple backticks or with the CodeBlockWidget.\n\nThe language can optionally be specified after the opening triple braces:\n\n<$codeblock code=\"\"\"```css\n * { margin: 0; padding: 0; } /* micro reset */\n\nhtml { font-size: 62.5%; }\nbody { font-size: 14px; font-size: 1.4rem; } /* =14px */\nh1   { font-size: 24px; font-size: 2.4rem; } /* =24px */\n```\"\"\"/>\n\nIf no language is specified highlight.js will attempt to automatically detect the language.\n\n! Built-in Language Brushes\n\nThe plugin includes support for the following languages (referred to as \"brushes\" by highlight.js):\n\n* apache\n* bash\n* coffeescript\n* cpp\n* cs\n* css\n* diff\n* http\n* ini\n* java\n* javascript\n* json\n* makefile\n* markdown\n* nginx\n* objectivec\n* perl\n* php\n* python\n* ruby\n* sql\n* xml\n\n"
        },
        "$:/plugins/tiddlywiki/highlight/styles": {
            "title": "$:/plugins/tiddlywiki/highlight/styles",
            "tags": "[[$:/tags/Stylesheet]]",
            "text": ".hljs{display:block;overflow-x:auto;padding:.5em;color:#333;background:#f8f8f8;-webkit-text-size-adjust:none}.hljs-comment,.diff .hljs-header,.hljs-javadoc{color:#998;font-style:italic}.hljs-keyword,.css .rule .hljs-keyword,.hljs-winutils,.nginx .hljs-title,.hljs-subst,.hljs-request,.hljs-status{color:#333;font-weight:bold}.hljs-number,.hljs-hexcolor,.ruby .hljs-constant{color:teal}.hljs-string,.hljs-tag .hljs-value,.hljs-phpdoc,.hljs-dartdoc,.tex .hljs-formula{color:#d14}.hljs-title,.hljs-id,.scss .hljs-preprocessor{color:#900;font-weight:bold}.hljs-list .hljs-keyword,.hljs-subst{font-weight:normal}.hljs-class .hljs-title,.hljs-type,.vhdl .hljs-literal,.tex .hljs-command{color:#458;font-weight:bold}.hljs-tag,.hljs-tag .hljs-title,.hljs-rule .hljs-property,.django .hljs-tag .hljs-keyword{color:navy;font-weight:normal}.hljs-attribute,.hljs-variable,.lisp .hljs-body,.hljs-name{color:teal}.hljs-regexp{color:#009926}.hljs-symbol,.ruby .hljs-symbol .hljs-string,.lisp .hljs-keyword,.clojure .hljs-keyword,.scheme .hljs-keyword,.tex .hljs-special,.hljs-prompt{color:#990073}.hljs-built_in{color:#0086b3}.hljs-preprocessor,.hljs-pragma,.hljs-pi,.hljs-doctype,.hljs-shebang,.hljs-cdata{color:#999;font-weight:bold}.hljs-deletion{background:#fdd}.hljs-addition{background:#dfd}.diff .hljs-change{background:#0086b3}.hljs-chunk{color:#aaa}"
        },
        "$:/plugins/tiddlywiki/highlight/usage": {
            "title": "$:/plugins/tiddlywiki/highlight/usage",
            "text": "! Usage\n\nFenced code blocks can have a language specifier added to trigger highlighting in a specific language. Otherwise heuristics are used to detect the language.\n\n```\n ```js\n var a = b + c; // Highlighted as JavaScript\n ```\n```\n! Adding Themes\n\nYou can add themes from highlight.js by copying the CSS to a new tiddler and tagging it with [[$:/tags/Stylesheet]]. The available themes can be found on GitHub:\n\nhttps://github.com/isagalaev/highlight.js/tree/master/src/styles\n"
        }
    }
}
{
    "tiddlers": {
        "$:/plugins/wimmoermans/history/fhistory.js": {
            "created": "20160511174147745",
            "creator": "wjam",
            "text": "/*\\\ntitle: $:/plugins/wimmoermans/fhistory.js\ntype: application/javascript\nmodule-type: filteroperator\n\na filter to generate ALL tiddler titles from historylist, \nrepairs escaped characters \" \\\n\nassumptions format of historylist \n  \"title\":\\s\"(.*)\"  where .* is the title of the visited tiddler\n\n@preserve\n\\*/\n\n (function() {\n        \"use strict\";\n        exports.fullhistory = function(e, t, i) {\n           var    o = [],\n                    match=\"\",\n                    regexp= \"\",\n                    ttt=\"\",\n                    text=\"\";\n            regexp = new RegExp(\"\\\"title\\\": \\\"(.+)\\\"\", \"ig\");\n            text = $tw.wiki.getTiddlerText(\"$:/HistoryList\");\n            while (match = regexp.exec(text)) {\n                ttt=match[1].replace(/\\\\\\\"/g,\"\\\"\");\n                ttt=ttt.replace(/\\\\\\\\/g,\"\\\\\");\n                o.push(ttt); /* oldest first */\n            }; /*while*/\n            return o;\n        }; /* export */\n\n }   )();",
            "type": "application/javascript",
            "title": "$:/plugins/wimmoermans/history/fhistory.js",
            "tags": "historyplugin",
            "module-type": "filteroperator",
            "modifier": "wjam",
            "modified": "20160513184814825"
        },
        "$:/plugins/wimmoermans/history/history.js": {
            "created": "20160505064231013",
            "creator": "Wim Moermans",
            "text": "/*\\\ntitle: $:/plugins/wimmoermans/history.js\ntype: application/javascript\nmodule-type: filteroperator\n\na filter to generate tiddler titles from historylist, reverse order, no duplicates (only most recent), no drafts.\n\nassumptions\n  \"title\":\\s\"(.*)\"  where .* is the title\n  \"Draft of '\" is the prefix for tiddler in edit mode\n\n@preserve\n\\*/\n\n (function() {\n        \"use strict\";\n        exports.history = function(e, t, i) {\n           var results = new Array(),\n                    o = [],\n                    match=\"\",\n                    regexp= \"\",\n                    text=\"\",\nttt=\"\",\n                    i=0,\n                    j=0,\n                    entries=0,\n                    found=0;\n            regexp = new RegExp(\"\\\"title\\\": \\\"(.+)\\\"\", \"ig\");\n            text = $tw.wiki.getTiddlerText(\"$:/HistoryList\");\n            while (match = regexp.exec(text)) {\n                ttt=match[1].replace(/\\\\\\\"/g,\"\\\"\");\n                ttt=ttt.replace(/\\\\\\\\/g,\"\\\\\");\n                if (ttt.substr(0, 10) !== \"Draft of '\") {\n                    results.push(ttt); /* oldest first */\n                    entries = entries + 1;\n                }\n            }\n            i = entries-1;\n            while (i >= 0) {\n                j = i + 1;\n                found = 0;\n                while ((j <= entries) && (found === 0)) {\n                    if (results[i] === results[j]) {\n                        found = 1;\n                    }\n                    j = j + 1;\n                }\n                if (found === 0) {\n\n                    if( results[i] !== \"\"){\n                         o.push(results[i]);\n                    }\n                }\n                i = i - 1;\n            };\n            return o;\n        }\n\n }   )();",
            "type": "application/javascript",
            "title": "$:/plugins/wimmoermans/history/history.js",
            "tags": "historyplugin",
            "module-type": "filteroperator",
            "modifier": "wjam",
            "modified": "20160513175106215"
        },
        "$:/plugins/wimmoermans/history/HistoryTab": {
            "created": "20160504135142490",
            "creator": "Wim Moermans",
            "text": "<small>breadcrumbs:</small>\n\n{{{ [history[]] }}}\n\n\n",
            "title": "$:/plugins/wimmoermans/history/HistoryTab",
            "tags": "$:/tags/SideBar historyplugin",
            "modifier": "wjam",
            "modified": "20160507201121730",
            "caption": "History"
        },
        "$:/plugins/wimmoermans/history/HistoryTab2": {
            "text": "<$linkcatcher to=\"$:/temp/advancedsearch\">\n\n<<lingo Shadows/Hint>>\n\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n</div>\n\n</$linkcatcher>\n{{{ [history[]search{$:/temp/advancedsearch}limit[26]] }}}\n",
            "title": "$:/plugins/wimmoermans/history/HistoryTab2",
            "tags": "$:/tags/AdvancedSearch historyplugin",
            "modifier": "wjam",
            "modified": "20160507171948465",
            "creator": "Wim Moermans",
            "created": "20160505094007336",
            "caption": "History2"
        },
        "$:/plugins/wimmoermans/history/icon": {
            "created": "20160508110003253",
            "title": "$:/plugins/wimmoermans/history/icon",
            "type": "image/svg+xml",
            "text": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<!-- Created with Inkscape (http://www.inkscape.org/) -->\n\n<svg\n   xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n   xmlns:cc=\"http://creativecommons.org/ns#\"\n   xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"\n   xmlns:svg=\"http://www.w3.org/2000/svg\"\n   xmlns=\"http://www.w3.org/2000/svg\"\n   xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n   xmlns:sodipodi=\"http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd\"\n   xmlns:inkscape=\"http://www.inkscape.org/namespaces/inkscape\"\n   width=\"22\"\n   height=\"21\"\n   id=\"svg4046\"\n   version=\"1.1\"\n   inkscape:version=\"0.47 r22583\"\n   sodipodi:docname=\"cat backtrack red sign 22x21.svg\">\n  <defs\n     id=\"defs4048\">\n    <linearGradient\n       inkscape:collect=\"always\"\n       id=\"linearGradient3600\">\n      <stop\n         style=\"stop-color:#ff0024;stop-opacity:1;\"\n         offset=\"0\"\n         id=\"stop3602\" />\n      <stop\n         style=\"stop-color:#ff0024;stop-opacity:0;\"\n         offset=\"1\"\n         id=\"stop3604\" />\n    </linearGradient>\n    <inkscape:perspective\n       sodipodi:type=\"inkscape:persp3d\"\n       inkscape:vp_x=\"0 : 526.18109 : 1\"\n       inkscape:vp_y=\"0 : 1000 : 0\"\n       inkscape:vp_z=\"744.09448 : 526.18109 : 1\"\n       inkscape:persp3d-origin=\"372.04724 : 350.78739 : 1\"\n       id=\"perspective4054\" />\n    <inkscape:perspective\n       id=\"perspective4064\"\n       inkscape:persp3d-origin=\"0.5 : 0.33333333 : 1\"\n       inkscape:vp_z=\"1 : 0.5 : 1\"\n       inkscape:vp_y=\"0 : 1000 : 0\"\n       inkscape:vp_x=\"0 : 0.5 : 1\"\n       sodipodi:type=\"inkscape:persp3d\" />\n    <linearGradient\n       inkscape:collect=\"always\"\n       xlink:href=\"#linearGradient3600\"\n       id=\"linearGradient3606\"\n       x1=\"-17.230652\"\n       y1=\"4.6165885\"\n       x2=\"-3.4143419\"\n       y2=\"4.6165885\"\n       gradientUnits=\"userSpaceOnUse\" />\n  </defs>\n  <sodipodi:namedview\n     id=\"base\"\n     pagecolor=\"#ffffff\"\n     bordercolor=\"#666666\"\n     borderopacity=\"1.0\"\n     inkscape:pageopacity=\"0.0\"\n     inkscape:pageshadow=\"2\"\n     inkscape:zoom=\"31.678384\"\n     inkscape:cx=\"9.633971\"\n     inkscape:cy=\"9.3724875\"\n     inkscape:document-units=\"px\"\n     inkscape:current-layer=\"layer1\"\n     showgrid=\"false\"\n     inkscape:window-width=\"1690\"\n     inkscape:window-height=\"1181\"\n     inkscape:window-x=\"-5\"\n     inkscape:window-y=\"-5\"\n     inkscape:window-maximized=\"1\" />\n  <metadata\n     id=\"metadata4051\">\n    <rdf:RDF>\n      <cc:Work\n         rdf:about=\"\">\n        <dc:format>image/svg+xml</dc:format>\n        <dc:type\n           rdf:resource=\"http://purl.org/dc/dcmitype/StillImage\" />\n        <dc:title></dc:title>\n      </cc:Work>\n    </rdf:RDF>\n  </metadata>\n  <g\n     inkscape:label=\"Layer 1\"\n     inkscape:groupmode=\"layer\"\n     id=\"layer1\"\n     transform=\"translate(-12.564828,-228.71506)\">\n    <path\n       sodipodi:type=\"arc\"\n       style=\"fill:#fcfcfc;fill-opacity:1;fill-rule:nonzero\"\n       id=\"path6042\"\n       sodipodi:cx=\"-1.4836615\"\n       sodipodi:cy=\"-1.6968651\"\n       sodipodi:rx=\"6.976366\"\n       sodipodi:ry=\"6.8500967\"\n       d=\"m 5.4927045,-1.6968651 a 6.976366,6.8500967 0 1 1 -13.9527321,0 6.976366,6.8500967 0 1 1 13.9527321,0 z\"\n       transform=\"matrix(1.2556561,0,0,1.2788018,25.334287,241.26263)\" />\n    <path\n       id=\"path6044\"\n       d=\"m 30.4446,244.31397 c 0.310834,-0.28767 0.606444,-0.65004 0.656841,-0.80533 0.226308,-0.69733 -1.75153,-1.35182 -2.563323,-0.84824 -0.640438,0.39727 -1.154161,1.973 -0.807158,2.47583 0.257232,0.37275 0.420332,0.39322 1.137559,0.14288 0.460496,-0.16076 0.876334,-0.32601 0.924074,-0.36721 0.04729,-0.042 0.341159,-0.31027 0.65198,-0.59797 l 2.7e-5,4e-5 z m 0.597108,-2.74293 c 0.09612,-0.164 0.0099,-0.46244 -0.199577,-0.69068 -0.46117,-0.50252 -1.166755,-0.22586 -1.371622,0.53779 -0.138492,0.51626 -0.124003,0.53781 0.418457,0.62237 0.608375,0.0949 0.889192,-0.0195 1.152742,-0.46948 z m -3.686825,2.07878 c 0.168572,-0.62841 -0.06485,-0.93373 -0.745912,-0.97577 -0.770729,-0.0477 -1.241044,0.64384 -0.836604,1.22992 0.512291,0.74232 1.35136,0.60756 1.582532,-0.25415 l -1.6e-5,0 z m 1.462533,-2.12446 0.185272,-0.64054 -0.625699,-0.0677 c -0.706134,-0.0764 -0.924717,0.0207 -1.305369,0.57977 -0.335314,0.49243 -0.04392,0.93382 0.644496,0.97629 0.707662,0.0437 0.882331,-0.0906 1.101289,-0.84784 l 1.1e-5,-4e-5 z m -7.797022,1.15185 c 0.76937,-0.85185 0.741916,-1.28981 -0.106461,-1.69843 -0.998166,-0.48078 -1.914981,-0.37475 -2.454339,0.28389 -0.516439,0.63069 -0.583894,1.63687 -0.151704,2.26314 0.51397,0.74476 1.572512,0.41361 2.712495,-0.8486 z m -3.48099,-0.42697 c 0.0896,-0.69621 -0.04686,-0.87565 -0.696238,-0.91572 -1.139297,-0.0703 -1.566432,0.84984 -0.702808,1.51406 0.586816,0.4513 1.303444,0.14483 1.399073,-0.59834 l -2.7e-5,0 z m 3.354628,-2.52461 c 0.149115,-0.45951 -0.275478,-0.99883 -0.833864,-1.05921 -0.603977,-0.0653 -0.7421,0.0289 -0.89905,0.61367 -0.166828,0.62185 0.06374,0.9337 0.720441,0.97418 0.628634,0.0389 0.868921,-0.0867 1.012367,-0.52882 l 1.06e-4,1.8e-4 z m -2.408088,0.34458 c 0.112063,-0.75445 -0.0033,-0.89128 -0.721233,-0.85538 -0.828289,0.0413 -1.07332,0.23945 -1.137099,0.9192 -0.05268,0.56122 -0.02343,0.59189 0.653277,0.68515 0.878304,0.12109 1.095906,-0.0141 1.204881,-0.74921 l 1.74e-4,2.4e-4 z m 5.888163,-5.33851 c 0.142599,-0.43933 -0.245444,-0.96317 -1.034761,-1.39674 -0.659415,-0.36226 -1.526134,-0.27635 -1.956444,0.1939 -0.468183,0.51161 -0.852424,1.97658 -0.610417,2.32725 0.48829,0.70756 3.291025,-0.16736 3.601586,-1.12433 l 3.6e-5,-8e-5 z m 0.05327,-2.11052 c 0.567019,-0.52796 -0.337185,-1.89786 -1.117088,-1.69249 -0.480085,0.12648 -0.794832,1.02942 -0.505121,1.44923 0.309844,0.44897 1.249847,0.58994 1.622222,0.24325 l -1.3e-5,1e-5 z m -3.840095,1.12289 c 0.05032,-0.53627 0.0115,-0.59251 -0.526932,-0.76354 -0.319703,-0.10149 -0.703975,-0.10859 -0.853942,-0.0154 -0.412123,0.25566 -0.580704,0.98457 -0.316321,1.36768 0.511143,0.74066 1.608153,0.36021 1.697198,-0.58862 l -3e-6,-7e-5 z m 1.399399,-1.72835 c 0.13752,-0.4755 0.08353,-0.73271 -0.201357,-0.9592 -0.777497,-0.6182 -2.043348,0.0734 -1.830727,1.00011 0.08032,0.34992 1.408324,0.87902 1.720388,0.68544 0.06804,-0.0423 0.208269,-0.3691 0.311631,-0.72643 l 6.5e-5,8e-5 z\"\n       style=\"fill:#000000\"\n       sodipodi:nodetypes=\"cssssscccsssccsscccccsscccsssccsscccssscccssscccssscccsscccssscccssscc\" />\n  </g>\n</svg>\n",
            "modified": "20160508110047926"
        },
        "$:/plugins/wimmoermans/history/readme": {
            "created": "20160505113313287",
            "creator": "wjam",
            "text": "!!history filter\nTom Tumb (Dutch: Klein Duimpje).used breadcrumbs because he didn't want to get lost in the woods. \n\nWhen reading or editing a large ~TiddlyWiki you sometimes get lost and revisit tiddlers over and over.  This plugin ''automatically creates a list of all visited tiddlers'', and allows you to answer questions like \"Where did I read that?\", \"Did I update tiddler XXYY already?\", \"Which system tiddlers did I view/edit?\" \"Which tiddlers did I rename/delete?\". \n\n!!functionality \n\n*The ''plugin/filter'' generates the tiddlers which you visited since loading the ~TiddlyWiki. \n*Like  ~OpenTab all* tiddlers from the story river are shown in the history. When you ''close a tiddler'' it is removed from the ~OpenTab but is ''still present in the the history''. \n*Tiddler titles which were opened using tiddlers like $:/DefaultTiddlers are not present in the history.\n*Like  ~RecentTab, the history list includes the tiddlers you created or modified during this session. When you ''delete or rename'' a tiddler during your session the old tiddler title will be in ''//italics//''. \n\ncompare ''history[]  and ''fullhistory[]\n\n| |''history[]''|fullhistory[]|\n| most recent visited tiddlers|''most recent visited appear first''|most recent appear last|\n| Draft titles|''drafts not included ''|all drafts ^^dangerous[1]^^|\n| visited multiple times|''no duplicates, only most recent title''|includes all duplicates|\n| usage|normal use|advanced use only|\n\n!!examples\n\nTo display all visited tiddlers so far use\n\n ``{{{[history[]]}}}`` \n\nYou can sort the list alphabetically, ''search'' the tiddlers and ''limit'' the number of results to 25. e.g.\n\n``{{{[history[]search{$:/temp/search}limit[25]]}}}``\n\nif you want to exclude system tiddlers from the list:\n\n``{{{[history[]!is[system]]}}}``\n\nIf you want modified but possibly not yet saved tiddlers (incl renamed, deleted but excluding Draft. \n\n``{{{[history[]haschanged[]]}}}``\n\n''fullhistory[]'' is only included for //advanced users//. To generate the same list as history[] you would have to write \n``{{{[fullhistory[]!prefix[Draft]reverse[]unique[]]}}}``  ^^[2]^^\n\n!!how to install \n\n''Drag'' the link $:/plugins/wimmoermans/history to your wiki, ''import'' the tiddler and ''save'' your wiki, then ''LOAD'' the newly saved wiki.\nOr ''open'' the history tiddler in this wiki and use the top right icon ''V'', ''export tiddler'', ''JSON file'' to save the tiddler to disk, then in your wiki in the sidebar use ''Tools'',  ''import (paperclip)'' to import the JSON file you just saved, ''save'' your wiki, ''LOAD'' the saved wiki.\n\n# history filter <br>[[$:/plugins/wimmoermans/history/history.js]]\n\n#fullhistory filter <br>[[$:/plugins/wimmoermans/history/fhistory.js]]\n\n#History tab in the Sidebar.<br>[[$:/plugins/wimmoermans/history/HistoryTab]]<br><small>(to disable remove the ~$:/tags/SideBar tag)</small>\n# History2 tab for advanced seard tiddler <br>[[$:/plugins/wimmoermans/history/HistoryTab2]]<br><small>(to disable remove the ~$:/tags/AdvancedSearch tag)</small>\n#$:/plugins/wimmoermans/history/readme this tiddler\n# $:/plugins/wimmoermans/history/icon three cat paw prints (by Motovun ?)\n\n!!Google plus forum to discuss the history filters\nhttps://groups.google.com/forum/#!topic/tiddlywiki/u4lN-olqnPc\n\n\n!! ~TiddlyWiki version compatibility [3]\nhistory and fullhistory were tested on version 5.1.12 pre-release, 5.1.11, 5.1.9, 5.0.8-beta. For 5.0.8-beta the tab-example tiddlers require manually adding the field named 'caption' value 'History' and 'History2' to present the Tab captions.\n\n!!notes/warning\n[1] clicking on ''Draft'' titles in the history is ''dangerous'' especially when the tiddler is already open.\n\n[2] ''unique[]'' is a undocumented filter present in ~TiddlyWiki boot.js.\n\n[3] history scan the $:/HistoryList tiddler for \"title\"://single space//\"//tiddler title//\"  and displays the //tiddler title// value. It correctly handles double quote and backslahs in tiddler titles.\n",
            "title": "$:/plugins/wimmoermans/history/readme",
            "tags": "historyplugin sh",
            "modifier": "wjam",
            "modified": "20160514063831746"
        }
    }
}
<small>breadcrumbs:</small>

{{{ [history[]] }}}


\rules except wikilink
Proceedings of INTERSPEECH 2020
\rules except wikilink
INTERSPEECH 2020
$:/core/ui/MoreSideBar/Missing
$:/core/ui/SideBar/More

{
    "tiddlers": {
        "$:/info/browser": {
            "title": "$:/info/browser",
            "text": "yes"
        },
        "$:/info/node": {
            "title": "$:/info/node",
            "text": "no"
        }
    }
}

{
    "tiddlers": {
        "$:/themes/tiddlywiki/snowwhite/base": {
            "title": "$:/themes/tiddlywiki/snowwhite/base",
            "tags": "[[$:/tags/Stylesheet]]",
            "text": "\\rules only filteredtranscludeinline transcludeinline macrodef macrocallinline\n\n.tc-sidebar-header {\n\ttext-shadow: 0 1px 0 <<colour sidebar-foreground-shadow>>;\n}\n\n.tc-tiddler-info {\n\t<<box-shadow \"inset 1px 2px 3px rgba(0,0,0,0.1)\">>\n}\n\n@media screen {\n\t.tc-tiddler-frame {\n\t\t<<box-shadow \"1px 1px 5px rgba(0, 0, 0, 0.3)\">>\n\t}\n}\n\n@media (max-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\t.tc-tiddler-frame {\n\t\t<<box-shadow none>>\n\t}\n}\n\n.tc-page-controls button svg, .tc-tiddler-controls button svg, .tc-topbar button svg {\n\t<<transition \"fill 150ms ease-in-out\">>\n}\n\n.tc-tiddler-controls button.tc-selected,\n.tc-page-controls button.tc-selected {\n\t<<filter \"drop-shadow(0px -1px 2px rgba(0,0,0,0.25))\">>\n}\n\n.tc-tiddler-frame input.tc-edit-texteditor {\n\t<<box-shadow \"inset 0 1px 8px rgba(0, 0, 0, 0.15)\">>\n}\n\n.tc-edit-tags {\n\t<<box-shadow \"inset 0 1px 8px rgba(0, 0, 0, 0.15)\">>\n}\n\n.tc-tiddler-frame .tc-edit-tags input.tc-edit-texteditor {\n\t<<box-shadow \"none\">>\n\tborder: none;\n\toutline: none;\n}\n\ncanvas.tc-edit-bitmapeditor  {\n\t<<box-shadow \"2px 2px 5px rgba(0, 0, 0, 0.5)\">>\n}\n\n.tc-drop-down {\n\tborder-radius: 4px;\n\t<<box-shadow \"2px 2px 10px rgba(0, 0, 0, 0.5)\">>\n}\n\n.tc-block-dropdown {\n\tborder-radius: 4px;\n\t<<box-shadow \"2px 2px 10px rgba(0, 0, 0, 0.5)\">>\n}\n\n.tc-modal {\n\tborder-radius: 6px;\n\t<<box-shadow \"0 3px 7px rgba(0,0,0,0.3)\">>\n}\n\n.tc-modal-footer {\n\tborder-radius: 0 0 6px 6px;\n\t<<box-shadow \"inset 0 1px 0 #fff\">>;\n}\n\n\n.tc-alert {\n\tborder-radius: 6px;\n\t<<box-shadow \"0 3px 7px rgba(0,0,0,0.6)\">>\n}\n\n.tc-notification {\n\tborder-radius: 6px;\n\t<<box-shadow \"0 3px 7px rgba(0,0,0,0.3)\">>\n\ttext-shadow: 0 1px 0 rgba(255,255,255, 0.8);\n}\n\n.tc-sidebar-lists .tc-tab-set .tc-tab-divider {\n\tborder-top: none;\n\theight: 1px;\n\t<<background-linear-gradient \"left, rgba(0,0,0,0.15) 0%, rgba(0,0,0,0.0) 100%\">>\n}\n\n.tc-more-sidebar .tc-tab-buttons button {\n\t<<background-linear-gradient \"left, rgba(0,0,0,0.01) 0%, rgba(0,0,0,0.1) 100%\">>\n}\n\n.tc-more-sidebar .tc-tab-buttons button.tc-tab-selected {\n\t<<background-linear-gradient \"left, rgba(0,0,0,0.05) 0%, rgba(255,255,255,0.05) 100%\">>\n}\n\n.tc-message-box img {\n\t<<box-shadow \"1px 1px 3px rgba(0,0,0,0.5)\">>\n}\n\n.tc-plugin-info {\n\t<<box-shadow \"1px 1px 3px rgba(0,0,0,0.5)\">>\n}\n"
        }
    }
}
{
    "tiddlers": {
        "$:/themes/tiddlywiki/vanilla/base": {
            "title": "$:/themes/tiddlywiki/vanilla/base",
            "tags": "[[$:/tags/Stylesheet]]",
            "text": "\\define custom-background-datauri()\n<$set name=\"background\" value={{$:/themes/tiddlywiki/vanilla/settings/backgroundimage}}>\n<$list filter=\"[<background>is[image]]\">\n`background: url(`\n<$list filter=\"[<background>!has[_canonical_uri]]\">\n<$macrocall $name=\"datauri\" title={{$:/themes/tiddlywiki/vanilla/settings/backgroundimage}}/>\n</$list>\n<$list filter=\"[<background>has[_canonical_uri]]\">\n<$view tiddler={{$:/themes/tiddlywiki/vanilla/settings/backgroundimage}} field=\"_canonical_uri\"/>\n</$list>\n`) center center;`\n`background-attachment: `{{$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment}}`;\n-webkit-background-size:` {{$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize}}`;\n-moz-background-size:` {{$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize}}`;\n-o-background-size:` {{$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize}}`;\nbackground-size:` {{$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize}}`;`\n</$list>\n</$set>\n\\end\n\n\\define if-fluid-fixed(text,hiddenSidebarText)\n<$reveal state=\"$:/themes/tiddlywiki/vanilla/options/sidebarlayout\" type=\"match\" text=\"fluid-fixed\">\n$text$\n<$reveal state=\"$:/state/sidebar\" type=\"nomatch\" text=\"yes\" default=\"yes\">\n$hiddenSidebarText$\n</$reveal>\n</$reveal>\n\\end\n\n\\rules only filteredtranscludeinline transcludeinline macrodef macrocallinline macrocallblock\n\n/*\n** Start with the normalize CSS reset, and then belay some of its effects\n*/\n\n{{$:/themes/tiddlywiki/vanilla/reset}}\n\n*, input[type=\"search\"] {\n\tbox-sizing: border-box;\n\t-moz-box-sizing: border-box;\n\t-webkit-box-sizing: border-box;\n}\n\nhtml button {\n\tline-height: 1.2;\n\tcolor: <<colour button-foreground>>;\n\tbackground: <<colour button-background>>;\n\tborder-color: <<colour button-border>>;\n}\n\n/*\n** Basic element styles\n*/\n\nhtml {\n\tfont-family: {{$:/themes/tiddlywiki/vanilla/settings/fontfamily}};\n\ttext-rendering: optimizeLegibility; /* Enables kerning and ligatures etc. */\n\t-webkit-font-smoothing: antialiased;\n\t-moz-osx-font-smoothing: grayscale;\n}\n\nhtml:-webkit-full-screen {\n\tbackground-color: <<colour page-background>>;\n}\n\nbody.tc-body {\n\tfont-size: {{$:/themes/tiddlywiki/vanilla/metrics/fontsize}};\n\tline-height: {{$:/themes/tiddlywiki/vanilla/metrics/lineheight}};\n\tcolor: <<colour foreground>>;\n\tbackground-color: <<colour page-background>>;\n\tfill: <<colour foreground>>;\n\tword-wrap: break-word;\n\t<<custom-background-datauri>>\n}\n\nh1, h2, h3, h4, h5, h6 {\n\tline-height: 1.2;\n\tfont-weight: 300;\n}\n\npre {\n\tdisplay: block;\n\tpadding: 14px;\n\tmargin-top: 1em;\n\tmargin-bottom: 1em;\n\tword-break: normal;\n\tword-wrap: break-word;\n\twhite-space: {{$:/themes/tiddlywiki/vanilla/options/codewrapping}};\n\tbackground-color: <<colour pre-background>>;\n\tborder: 1px solid <<colour pre-border>>;\n\tpadding: 0 3px 2px;\n\tborder-radius: 3px;\n\tfont-family: {{$:/themes/tiddlywiki/vanilla/settings/codefontfamily}};\n}\n\ncode {\n\tcolor: <<colour code-foreground>>;\n\tbackground-color: <<colour code-background>>;\n\tborder: 1px solid <<colour code-border>>;\n\twhite-space: {{$:/themes/tiddlywiki/vanilla/options/codewrapping}};\n\tpadding: 0 3px 2px;\n\tborder-radius: 3px;\n\tfont-family: {{$:/themes/tiddlywiki/vanilla/settings/codefontfamily}};\n}\n\nblockquote {\n\tborder-left: 5px solid <<colour blockquote-bar>>;\n\tmargin-left: 25px;\n\tpadding-left: 10px;\n}\n\ndl dt {\n\tfont-weight: bold;\n\tmargin-top: 6px;\n}\n\ntextarea,\ninput[type=text],\ninput[type=search],\ninput[type=\"\"],\ninput:not([type]) {\n\tcolor: <<colour foreground>>;\n\tbackground: <<colour background>>;\n}\n\n.tc-muted {\n\tcolor: <<colour muted-foreground>>;\n}\n\nsvg.tc-image-button {\n\tpadding: 0px 1px 1px 0px;\n}\n\nkbd {\n\tdisplay: inline-block;\n\tpadding: 3px 5px;\n\tfont-size: 0.8em;\n\tline-height: 1.2;\n\tcolor: <<colour foreground>>;\n\tvertical-align: middle;\n\tbackground-color: <<colour background>>;\n\tborder: solid 1px <<colour muted-foreground>>;\n\tborder-bottom-color: <<colour muted-foreground>>;\n\tborder-radius: 3px;\n\tbox-shadow: inset 0 -1px 0 <<colour muted-foreground>>;\n}\n\n/*\nMarkdown likes putting code elements inside pre elements\n*/\npre > code {\n\tpadding: 0;\n\tborder: none;\n\tbackground-color: inherit;\n\tcolor: inherit;\n}\n\ntable {\n\tborder: 1px solid <<colour table-border>>;\n\twidth: auto;\n\tmax-width: 100%;\n\tcaption-side: bottom;\n\tmargin-top: 1em;\n\tmargin-bottom: 1em;\n}\n\ntable th, table td {\n\tpadding: 0 7px 0 7px;\n\tborder-top: 1px solid <<colour table-border>>;\n\tborder-left: 1px solid <<colour table-border>>;\n}\n\ntable thead tr td, table th {\n\tbackground-color: <<colour table-header-background>>;\n\tfont-weight: bold;\n}\n\ntable tfoot tr td {\n\tbackground-color: <<colour table-footer-background>>;\n}\n\n.tc-csv-table {\n\twhite-space: nowrap;\n}\n\n.tc-tiddler-frame img,\n.tc-tiddler-frame svg,\n.tc-tiddler-frame canvas,\n.tc-tiddler-frame embed,\n.tc-tiddler-frame iframe {\n\tmax-width: 100%;\n}\n\n.tc-tiddler-body > embed,\n.tc-tiddler-body > iframe {\n\twidth: 100%;\n\theight: 600px;\n}\n\n/*\n** Links\n*/\n\nbutton.tc-tiddlylink,\na.tc-tiddlylink {\n\ttext-decoration: none;\n\tfont-weight: normal;\n\tcolor: <<colour tiddler-link-foreground>>;\n\t-webkit-user-select: inherit; /* Otherwise the draggable attribute makes links impossible to select */\n}\n\n.tc-sidebar-lists a.tc-tiddlylink {\n\tcolor: <<colour sidebar-tiddler-link-foreground>>;\n}\n\n.tc-sidebar-lists a.tc-tiddlylink:hover {\n\tcolor: <<colour sidebar-tiddler-link-foreground-hover>>;\n}\n\nbutton.tc-tiddlylink:hover,\na.tc-tiddlylink:hover {\n\ttext-decoration: underline;\n}\n\na.tc-tiddlylink-resolves {\n}\n\na.tc-tiddlylink-shadow {\n\tfont-weight: bold;\n}\n\na.tc-tiddlylink-shadow.tc-tiddlylink-resolves {\n\tfont-weight: normal;\n}\n\na.tc-tiddlylink-missing {\n\tfont-style: italic;\n}\n\na.tc-tiddlylink-external {\n\ttext-decoration: underline;\n\tcolor: <<colour external-link-foreground>>;\n\tbackground-color: <<colour external-link-background>>;\n}\n\na.tc-tiddlylink-external:visited {\n\tcolor: <<colour external-link-foreground-visited>>;\n\tbackground-color: <<colour external-link-background-visited>>;\n}\n\na.tc-tiddlylink-external:hover {\n\tcolor: <<colour external-link-foreground-hover>>;\n\tbackground-color: <<colour external-link-background-hover>>;\n}\n\n/*\n** Drag and drop styles\n*/\n\n.tc-tiddler-dragger {\n\tposition: relative;\n\tz-index: -10000;\n}\n\n.tc-tiddler-dragger-inner {\n\tposition: absolute;\n\tdisplay: inline-block;\n\tpadding: 8px 20px;\n\tfont-size: 16.9px;\n\tfont-weight: bold;\n\tline-height: 20px;\n\tcolor: <<colour dragger-foreground>>;\n\ttext-shadow: 0 1px 0 rgba(0, 0, 0, 1);\n\twhite-space: nowrap;\n\tvertical-align: baseline;\n\tbackground-color: <<colour dragger-background>>;\n\tborder-radius: 20px;\n}\n\n.tc-tiddler-dragger-cover {\n\tposition: absolute;\n\tbackground-color: <<colour page-background>>;\n}\n\n.tc-dropzone {\n\tposition: relative;\n}\n\n.tc-dropzone.tc-dragover:before {\n\tz-index: 10000;\n\tdisplay: block;\n\tposition: fixed;\n\ttop: 0;\n\tleft: 0;\n\tright: 0;\n\tbackground: <<colour dropzone-background>>;\n\ttext-align: center;\n\tcontent: \"<<lingo DropMessage>>\";\n}\n\n/*\n** Plugin reload warning\n*/\n\n.tc-plugin-reload-warning {\n\tz-index: 1000;\n\tdisplay: block;\n\tposition: fixed;\n\ttop: 0;\n\tleft: 0;\n\tright: 0;\n\tbackground: <<colour alert-background>>;\n\ttext-align: center;\n}\n\n/*\n** Buttons\n*/\n\nbutton svg, button img, label svg, label img {\n\tvertical-align: middle;\n}\n\n.tc-btn-invisible {\n\tpadding: 0;\n\tmargin: 0;\n\tbackground: none;\n\tborder: none;\n}\n\n.tc-btn-boxed {\n\tfont-size: 0.6em;\n\tpadding: 0.2em;\n\tmargin: 1px;\n\tbackground: none;\n\tborder: 1px solid <<colour tiddler-controls-foreground>>;\n\tborder-radius: 0.25em;\n}\n\nhtml body.tc-body .tc-btn-boxed svg {\n\tfont-size: 1.6666em;\n}\n\n.tc-btn-boxed:hover {\n\tbackground: <<colour muted-foreground>>;\n\tcolor: <<colour background>>;\n}\n\nhtml body.tc-body .tc-btn-boxed:hover svg {\n\tfill: <<colour background>>;\n}\n\n.tc-btn-rounded {\n\tfont-size: 0.5em;\n\tline-height: 2;\n\tpadding: 0em 0.3em 0.2em 0.4em;\n\tmargin: 1px;\n\tborder: 1px solid <<colour muted-foreground>>;\n\tbackground: <<colour muted-foreground>>;\n\tcolor: <<colour background>>;\n\tborder-radius: 2em;\n}\n\nhtml body.tc-body .tc-btn-rounded svg {\n\tfont-size: 1.6666em;\n\tfill: <<colour background>>;\n}\n\n.tc-btn-rounded:hover {\n\tborder: 1px solid <<colour muted-foreground>>;\n\tbackground: <<colour background>>;\n\tcolor: <<colour muted-foreground>>;\n}\n\nhtml body.tc-body .tc-btn-rounded:hover svg {\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-btn-icon svg {\n\theight: 1em;\n\twidth: 1em;\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-btn-text {\n\tpadding: 0;\n\tmargin: 0;\n}\n\n.tc-btn-big-green {\n\tdisplay: inline-block;\n\tpadding: 8px;\n\tmargin: 4px 8px 4px 8px;\n\tbackground: <<colour download-background>>;\n\tcolor: <<colour download-foreground>>;\n\tfill: <<colour download-foreground>>;\n\tborder: none;\n\tfont-size: 1.2em;\n\tline-height: 1.4em;\n\ttext-decoration: none;\n}\n\n.tc-btn-big-green svg,\n.tc-btn-big-green img {\n\theight: 2em;\n\twidth: 2em;\n\tvertical-align: middle;\n\tfill: <<colour download-foreground>>;\n}\n\n.tc-sidebar-lists input {\n\tcolor: <<colour foreground>>;\n}\n\n.tc-sidebar-lists button {\n\tcolor: <<colour sidebar-button-foreground>>;\n\tfill: <<colour sidebar-button-foreground>>;\n}\n\n.tc-sidebar-lists button.tc-btn-mini {\n\tcolor: <<colour sidebar-muted-foreground>>;\n}\n\n.tc-sidebar-lists button.tc-btn-mini:hover {\n\tcolor: <<colour sidebar-muted-foreground-hover>>;\n}\n\nbutton svg.tc-image-button, button .tc-image-button img {\n\theight: 1em;\n\twidth: 1em;\n}\n\n.tc-unfold-banner {\n\tposition: absolute;\n\tpadding: 0;\n\tmargin: 0;\n\tbackground: none;\n\tborder: none;\n\twidth: 100%;\n\twidth: calc(100% + 2px);\n\tmargin-left: -43px;\n\ttext-align: center;\n\tborder-top: 2px solid <<colour tiddler-info-background>>;\n\tmargin-top: 4px;\n}\n\n.tc-unfold-banner:hover {\n\tbackground: <<colour tiddler-info-background>>;\n\tborder-top: 2px solid <<colour tiddler-info-border>>;\n}\n\n.tc-unfold-banner svg, .tc-fold-banner svg {\n\theight: 0.75em;\n\tfill: <<colour tiddler-controls-foreground>>;\n}\n\n.tc-unfold-banner:hover svg, .tc-fold-banner:hover svg {\n\tfill: <<colour tiddler-controls-foreground-hover>>;\n}\n\n.tc-fold-banner {\n\tposition: absolute;\n\tpadding: 0;\n\tmargin: 0;\n\tbackground: none;\n\tborder: none;\n\twidth: 23px;\n\ttext-align: center;\n\tmargin-left: -35px;\n\ttop: 6px;\n\tbottom: 6px;\n}\n\n.tc-fold-banner:hover {\n\tbackground: <<colour tiddler-info-background>>;\n}\n\n@media (max-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-unfold-banner {\n\t\tposition: static;\n\t\twidth: calc(100% + 59px);\n\t}\n\n\t.tc-fold-banner {\n\t\twidth: 16px;\n\t\tmargin-left: -16px;\n\t\tfont-size: 0.75em;\n\t}\n\n}\n\n/*\n** Tags and missing tiddlers\n*/\n\n.tc-tag-list-item {\n\tposition: relative;\n\tdisplay: inline-block;\n\tmargin-right: 7px;\n}\n\n.tc-tags-wrapper {\n\tmargin: 4px 0 14px 0;\n}\n\n.tc-missing-tiddler-label {\n\tfont-style: italic;\n\tfont-weight: normal;\n\tdisplay: inline-block;\n\tfont-size: 11.844px;\n\tline-height: 14px;\n\twhite-space: nowrap;\n\tvertical-align: baseline;\n}\n\nbutton.tc-tag-label, span.tc-tag-label {\n\tdisplay: inline-block;\n\tpadding: 0.16em 0.7em;\n\tfont-size: 0.9em;\n\tfont-weight: 300;\n\tline-height: 1.2em;\n\tcolor: <<colour tag-foreground>>;\n\twhite-space: nowrap;\n\tvertical-align: baseline;\n\tbackground-color: <<colour tag-background>>;\n\tborder-radius: 1em;\n}\n\n.tc-untagged-separator {\n\twidth: 10em;\n\tleft: 0;\n\tmargin-left: 0;\n\tborder: 0;\n\theight: 1px;\n\tbackground: <<colour tab-divider>>;\n}\n\nbutton.tc-untagged-label {\n\tbackground-color: <<colour untagged-background>>;\n}\n\n.tc-tag-label svg, .tc-tag-label img {\n\theight: 1em;\n\twidth: 1em;\n\tfill: <<colour tag-foreground>>;\n}\n\n.tc-tag-manager-table .tc-tag-label {\n\twhite-space: normal;\n}\n\n.tc-tag-manager-tag {\n\twidth: 100%;\n}\n\n/*\n** Page layout\n*/\n\n.tc-topbar {\n\tposition: fixed;\n\tz-index: 1200;\n}\n\n.tc-topbar-left {\n\tleft: 29px;\n\ttop: 5px;\n}\n\n.tc-topbar-right {\n\ttop: 5px;\n\tright: 29px;\n}\n\n.tc-topbar button {\n\tpadding: 8px;\n}\n\n.tc-topbar svg {\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-topbar button:hover svg {\n\tfill: <<colour foreground>>;\n}\n\n.tc-sidebar-header {\n\tcolor: <<colour sidebar-foreground>>;\n\tfill: <<colour sidebar-foreground>>;\n}\n\n.tc-sidebar-header .tc-title a.tc-tiddlylink-resolves {\n\tfont-weight: 300;\n}\n\n.tc-sidebar-header .tc-sidebar-lists p {\n\tmargin-top: 3px;\n\tmargin-bottom: 3px;\n}\n\n.tc-sidebar-header .tc-missing-tiddler-label {\n\tcolor: <<colour sidebar-foreground>>;\n}\n\n.tc-advanced-search input {\n\twidth: 60%;\n}\n\n.tc-search a svg {\n\twidth: 1.2em;\n\theight: 1.2em;\n\tvertical-align: middle;\n}\n\n.tc-page-controls {\n\tmargin-top: 14px;\n\tfont-size: 1.5em;\n}\n\n.tc-page-controls button {\n\tmargin-right: 0.5em;\n}\n\n.tc-page-controls a.tc-tiddlylink:hover {\n\ttext-decoration: none;\n}\n\n.tc-page-controls img {\n\twidth: 1em;\n}\n\n.tc-page-controls svg {\n\tfill: <<colour sidebar-controls-foreground>>;\n}\n\n.tc-page-controls button:hover svg, .tc-page-controls a:hover svg {\n\tfill: <<colour sidebar-controls-foreground-hover>>;\n}\n\n.tc-menu-list-item {\n\twhite-space: nowrap;\n}\n\n.tc-menu-list-count {\n\tfont-weight: bold;\n}\n\n.tc-menu-list-subitem {\n\tpadding-left: 7px;\n}\n\n.tc-story-river {\n\tposition: relative;\n}\n\n@media (max-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-sidebar-header {\n\t\tpadding: 14px;\n\t\tmin-height: 32px;\n\t\tmargin-top: {{$:/themes/tiddlywiki/vanilla/metrics/storytop}};\n\t}\n\n\t.tc-story-river {\n\t\tposition: relative;\n\t\tpadding: 0;\n\t}\n}\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-message-box {\n\t\tmargin: 21px -21px 21px -21px;\n\t}\n\n\t.tc-sidebar-scrollable {\n\t\tposition: fixed;\n\t\ttop: {{$:/themes/tiddlywiki/vanilla/metrics/storytop}};\n\t\tleft: {{$:/themes/tiddlywiki/vanilla/metrics/storyright}};\n\t\tbottom: 0;\n\t\tright: 0;\n\t\toverflow-y: auto;\n\t\toverflow-x: auto;\n\t\t-webkit-overflow-scrolling: touch;\n\t\tmargin: 0 0 0 -42px;\n\t\tpadding: 71px 0 28px 42px;\n\t}\n\n\t.tc-story-river {\n\t\tposition: relative;\n\t\tleft: {{$:/themes/tiddlywiki/vanilla/metrics/storyleft}};\n\t\ttop: {{$:/themes/tiddlywiki/vanilla/metrics/storytop}};\n\t\twidth: {{$:/themes/tiddlywiki/vanilla/metrics/storywidth}};\n\t\tpadding: 42px 42px 42px 42px;\n\t}\n\n<<if-no-sidebar \"\n\n\t.tc-story-river {\n\t\twidth: calc(100% - {{$:/themes/tiddlywiki/vanilla/metrics/storyleft}});\n\t}\n\n\">>\n\n}\n\n@media print {\n\n\tbody.tc-body {\n\t\tbackground-color: transparent;\n\t}\n\n\t.tc-sidebar-header, .tc-topbar {\n\t\tdisplay: none;\n\t}\n\n\t.tc-story-river {\n\t\tmargin: 0;\n\t\tpadding: 0;\n\t}\n\n\t.tc-story-river .tc-tiddler-frame {\n\t\tmargin: 0;\n\t\tborder: none;\n\t\tpadding: 0;\n\t}\n}\n\n/*\n** Tiddler styles\n*/\n\n.tc-tiddler-frame {\n\tposition: relative;\n\tmargin-bottom: 28px;\n\tbackground-color: <<colour tiddler-background>>;\n\tborder: 1px solid <<colour tiddler-border>>;\n}\n\n{{$:/themes/tiddlywiki/vanilla/sticky}}\n\n.tc-tiddler-info {\n\tpadding: 14px 42px 14px 42px;\n\tbackground-color: <<colour tiddler-info-background>>;\n\tborder-top: 1px solid <<colour tiddler-info-border>>;\n\tborder-bottom: 1px solid <<colour tiddler-info-border>>;\n}\n\n.tc-tiddler-info p {\n\tmargin-top: 3px;\n\tmargin-bottom: 3px;\n}\n\n.tc-tiddler-info .tc-tab-buttons button.tc-tab-selected {\n\tbackground-color: <<colour tiddler-info-tab-background>>;\n\tborder-bottom: 1px solid <<colour tiddler-info-tab-background>>;\n}\n\n.tc-view-field-table {\n\twidth: 100%;\n}\n\n.tc-view-field-name {\n\twidth: 1%; /* Makes this column be as narrow as possible */\n\ttext-align: right;\n\tfont-style: italic;\n\tfont-weight: 200;\n}\n\n.tc-view-field-value {\n}\n\n@media (max-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\t.tc-tiddler-frame {\n\t\tpadding: 14px 14px 14px 14px;\n\t}\n\n\t.tc-tiddler-info {\n\t\tmargin: 0 -14px 0 -14px;\n\t}\n}\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\t.tc-tiddler-frame {\n\t\tpadding: 28px 42px 42px 42px;\n\t\twidth: {{$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth}};\n\t\tborder-radius: 2px;\n\t}\n\n<<if-no-sidebar \"\n\n\t.tc-tiddler-frame {\n\t\twidth: 100%;\n\t}\n\n\">>\n\n\t.tc-tiddler-info {\n\t\tmargin: 0 -42px 0 -42px;\n\t}\n}\n\n.tc-site-title,\n.tc-titlebar {\n\tfont-weight: 300;\n\tfont-size: 2.35em;\n\tline-height: 1.2em;\n\tcolor: <<colour tiddler-title-foreground>>;\n\tmargin: 0;\n}\n\n.tc-site-title {\n\tcolor: <<colour site-title-foreground>>;\n}\n\n.tc-tiddler-title-icon {\n\tvertical-align: middle;\n}\n\n.tc-system-title-prefix {\n\tcolor: <<colour muted-foreground>>;\n}\n\n.tc-titlebar h2 {\n\tfont-size: 1em;\n\tdisplay: inline;\n}\n\n.tc-titlebar img {\n\theight: 1em;\n}\n\n.tc-subtitle {\n\tfont-size: 0.9em;\n\tcolor: <<colour tiddler-subtitle-foreground>>;\n\tfont-weight: 300;\n}\n\n.tc-tiddler-missing .tc-title {\n  font-style: italic;\n  font-weight: normal;\n}\n\n.tc-tiddler-frame .tc-tiddler-controls {\n\tfloat: right;\n}\n\n.tc-tiddler-controls .tc-drop-down {\n\tfont-size: 0.6em;\n}\n\n.tc-tiddler-controls .tc-drop-down .tc-drop-down {\n\tfont-size: 1em;\n}\n\n.tc-tiddler-controls > span > button {\n\tvertical-align: baseline;\n\tmargin-left:5px;\n}\n\n.tc-tiddler-controls button svg, .tc-tiddler-controls button img,\n.tc-search button svg, .tc-search a svg {\n\theight: 0.75em;\n\tfill: <<colour tiddler-controls-foreground>>;\n}\n\n.tc-tiddler-controls button.tc-selected svg,\n.tc-page-controls button.tc-selected svg  {\n\tfill: <<colour tiddler-controls-foreground-selected>>;\n}\n\n.tc-tiddler-controls button.tc-btn-invisible:hover svg,\n.tc-search button:hover svg, .tc-search a:hover svg {\n\tfill: <<colour tiddler-controls-foreground-hover>>;\n}\n\n@media print {\n\t.tc-tiddler-controls {\n\t\tdisplay: none;\n\t}\n}\n\n.tc-tiddler-help { /* Help prompts within tiddler template */\n\tcolor: <<colour muted-foreground>>;\n\tmargin-top: 14px;\n}\n\n.tc-tiddler-help a.tc-tiddlylink {\n\tcolor: <<colour very-muted-foreground>>;\n}\n\n.tc-tiddler-frame .tc-edit-texteditor {\n\twidth: 100%;\n\tmargin: 4px 0 4px 0;\n}\n\n.tc-tiddler-frame input.tc-edit-texteditor,\n.tc-tiddler-frame textarea.tc-edit-texteditor,\n.tc-tiddler-frame iframe.tc-edit-texteditor {\n\tpadding: 3px 3px 3px 3px;\n\tborder: 1px solid <<colour tiddler-editor-border>>;\n\tline-height: 1.3em;\n\t-webkit-appearance: none;\n}\n\n.tc-tiddler-frame .tc-binary-warning {\n\twidth: 100%;\n\theight: 5em;\n\ttext-align: center;\n\tpadding: 3em 3em 6em 3em;\n\tbackground: <<colour alert-background>>;\n\tborder: 1px solid <<colour alert-border>>;\n}\n\n.tc-tiddler-frame input.tc-edit-texteditor {\n\tbackground-color: <<colour tiddler-editor-background>>;\n}\n\ncanvas.tc-edit-bitmapeditor  {\n\tborder: 6px solid <<colour tiddler-editor-border-image>>;\n\tcursor: crosshair;\n\t-moz-user-select: none;\n\t-webkit-user-select: none;\n\t-ms-user-select: none;\n\tmargin-top: 6px;\n\tmargin-bottom: 6px;\n}\n\n.tc-edit-bitmapeditor-width {\n\tdisplay: block;\n}\n\n.tc-edit-bitmapeditor-height {\n\tdisplay: block;\n}\n\n.tc-tiddler-body {\n\tclear: both;\n}\n\n.tc-tiddler-frame .tc-tiddler-body {\n\tfont-size: {{$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize}};\n\tline-height: {{$:/themes/tiddlywiki/vanilla/metrics/bodylineheight}};\n}\n\n.tc-titlebar, .tc-tiddler-edit-title {\n\toverflow: hidden; /* https://github.com/Jermolene/TiddlyWiki5/issues/282 */\n}\n\nhtml body.tc-body.tc-single-tiddler-window {\n\tmargin: 1em;\n\tbackground: <<colour tiddler-background>>;\n}\n\n.tc-single-tiddler-window img,\n.tc-single-tiddler-window svg,\n.tc-single-tiddler-window canvas,\n.tc-single-tiddler-window embed,\n.tc-single-tiddler-window iframe {\n\tmax-width: 100%;\n}\n\n/*\n** Editor\n*/\n\n.tc-editor-toolbar {\n\tmargin-top: 8px;\n}\n\n.tc-editor-toolbar button {\n\tvertical-align: middle;\n\tbackground-color: <<colour tiddler-controls-foreground>>;\n\tfill: <<colour tiddler-controls-foreground-selected>>;\n\tborder-radius: 4px;\n\tpadding: 3px;\n\tmargin: 2px 0 2px 4px;\n}\n\n.tc-editor-toolbar button.tc-text-editor-toolbar-item-adjunct {\n\tmargin-left: 1px;\n\twidth: 1em;\n\tborder-radius: 8px;\n}\n\n.tc-editor-toolbar button.tc-text-editor-toolbar-item-start-group {\n\tmargin-left: 11px;\n}\n\n.tc-editor-toolbar button.tc-selected {\n\tbackground-color: <<colour primary>>;\n}\n\n.tc-editor-toolbar button svg {\n\twidth: 1.6em;\n\theight: 1.2em;\n}\n\n.tc-editor-toolbar button:hover {\n\tbackground-color: <<colour tiddler-controls-foreground-selected>>;\n\tfill: <<colour background>>;\n}\n\n.tc-editor-toolbar .tc-text-editor-toolbar-more {\n\twhite-space: normal;\n}\n\n.tc-editor-toolbar .tc-text-editor-toolbar-more button {\n\tdisplay: inline-block;\n\tpadding: 3px;\n\twidth: auto;\n}\n\n.tc-editor-toolbar .tc-search-results {\n\tpadding: 0;\n}\n\n/*\n** Adjustments for fluid-fixed mode\n*/\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n<<if-fluid-fixed text:\"\"\"\n\n\t.tc-story-river {\n\t\tpadding-right: 0;\n\t\tposition: relative;\n\t\twidth: auto;\n\t\tleft: 0;\n\t\tmargin-left: {{$:/themes/tiddlywiki/vanilla/metrics/storyleft}};\n\t\tmargin-right: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth}};\n\t}\n\n\t.tc-tiddler-frame {\n\t\twidth: 100%;\n\t}\n\n\t.tc-sidebar-scrollable {\n\t\tleft: auto;\n\t\tbottom: 0;\n\t\tright: 0;\n\t\twidth: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth}};\n\t}\n\n\tbody.tc-body .tc-storyview-zoomin-tiddler {\n\t\twidth: 100%;\n\t\twidth: calc(100% - 42px);\n\t}\n\n\"\"\" hiddenSidebarText:\"\"\"\n\n\t.tc-story-river {\n\t\tpadding-right: 3em;\n\t\tmargin-right: 0;\n\t}\n\n\tbody.tc-body .tc-storyview-zoomin-tiddler {\n\t\twidth: 100%;\n\t\twidth: calc(100% - 84px);\n\t}\n\n\"\"\">>\n\n}\n\n/*\n** Toolbar buttons\n*/\n\n.tc-page-controls svg.tc-image-new-button {\n  fill: <<colour toolbar-new-button>>;\n}\n\n.tc-page-controls svg.tc-image-options-button {\n  fill: <<colour toolbar-options-button>>;\n}\n\n.tc-page-controls svg.tc-image-save-button {\n  fill: <<colour toolbar-save-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-info-button {\n  fill: <<colour toolbar-info-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-edit-button {\n  fill: <<colour toolbar-edit-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-close-button {\n  fill: <<colour toolbar-close-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-delete-button {\n  fill: <<colour toolbar-delete-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-cancel-button {\n  fill: <<colour toolbar-cancel-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-done-button {\n  fill: <<colour toolbar-done-button>>;\n}\n\n/*\n** Tiddler edit mode\n*/\n\n.tc-tiddler-edit-frame em.tc-edit {\n\tcolor: <<colour muted-foreground>>;\n\tfont-style: normal;\n}\n\n.tc-edit-type-dropdown a.tc-tiddlylink-missing {\n\tfont-style: normal;\n}\n\n.tc-edit-tags {\n\tborder: 1px solid <<colour tiddler-editor-border>>;\n\tpadding: 4px 8px 4px 8px;\n}\n\n.tc-edit-add-tag {\n\tdisplay: inline-block;\n}\n\n.tc-edit-add-tag .tc-add-tag-name input {\n\twidth: 50%;\n}\n\n.tc-edit-tags .tc-tag-label {\n\tdisplay: inline-block;\n}\n\n.tc-edit-tags-list {\n\tmargin: 14px 0 14px 0;\n}\n\n.tc-remove-tag-button {\n\tpadding-left: 4px;\n}\n\n.tc-tiddler-preview {\n\toverflow: auto;\n}\n\n.tc-tiddler-preview-preview {\n\tfloat: right;\n\twidth: 49%;\n\tborder: 1px solid <<colour tiddler-editor-border>>;\n\tmargin: 4px 3px 3px 3px;\n\tpadding: 3px 3px 3px 3px;\n}\n\n.tc-tiddler-frame .tc-tiddler-preview .tc-edit-texteditor {\n\twidth: 49%;\n}\n\n.tc-tiddler-frame .tc-tiddler-preview canvas.tc-edit-bitmapeditor {\n\tmax-width: 49%;\n}\n\n.tc-edit-fields {\n\twidth: 100%;\n}\n\n\n.tc-edit-fields table, .tc-edit-fields tr, .tc-edit-fields td {\n\tborder: none;\n\tpadding: 4px;\n}\n\n.tc-edit-fields > tbody > .tc-edit-field:nth-child(odd) {\n\tbackground-color: <<colour tiddler-editor-fields-odd>>;\n}\n\n.tc-edit-fields > tbody > .tc-edit-field:nth-child(even) {\n\tbackground-color: <<colour tiddler-editor-fields-even>>;\n}\n\n.tc-edit-field-name {\n\ttext-align: right;\n}\n\n.tc-edit-field-value input {\n\twidth: 100%;\n}\n\n.tc-edit-field-remove {\n}\n\n.tc-edit-field-remove svg {\n\theight: 1em;\n\twidth: 1em;\n\tfill: <<colour muted-foreground>>;\n\tvertical-align: middle;\n}\n\n.tc-edit-field-add-name {\n\tdisplay: inline-block;\n\twidth: 15%;\n}\n\n.tc-edit-field-add-value {\n\tdisplay: inline-block;\n\twidth: 40%;\n}\n\n.tc-edit-field-add-button {\n\tdisplay: inline-block;\n\twidth: 10%;\n}\n\n/*\n** Storyview Classes\n*/\n\n.tc-storyview-zoomin-tiddler {\n\tposition: absolute;\n\tdisplay: block;\n\twidth: 100%;\n}\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-storyview-zoomin-tiddler {\n\t\twidth: calc(100% - 84px);\n\t}\n\n}\n\n/*\n** Dropdowns\n*/\n\n.tc-btn-dropdown {\n\ttext-align: left;\n}\n\n.tc-btn-dropdown svg, .tc-btn-dropdown img {\n\theight: 1em;\n\twidth: 1em;\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-drop-down-wrapper {\n\tposition: relative;\n}\n\n.tc-drop-down {\n\tmin-width: 380px;\n\tborder: 1px solid <<colour dropdown-border>>;\n\tbackground-color: <<colour dropdown-background>>;\n\tpadding: 7px 0 7px 0;\n\tmargin: 4px 0 0 0;\n\twhite-space: nowrap;\n\ttext-shadow: none;\n\tline-height: 1.4;\n}\n\n.tc-drop-down .tc-drop-down {\n\tmargin-left: 14px;\n}\n\n.tc-drop-down button svg, .tc-drop-down a svg  {\n\tfill: <<colour foreground>>;\n}\n\n.tc-drop-down button.tc-btn-invisible:hover svg {\n\tfill: <<colour foreground>>;\n}\n\n.tc-drop-down p {\n\tpadding: 0 14px 0 14px;\n}\n\n.tc-drop-down svg {\n\twidth: 1em;\n\theight: 1em;\n}\n\n.tc-drop-down img {\n\twidth: 1em;\n}\n\n.tc-drop-down-language-chooser img {\n\twidth: 2em;\n\tvertical-align: baseline;\n}\n\n.tc-drop-down a, .tc-drop-down button {\n\tdisplay: block;\n\tpadding: 0 14px 0 14px;\n\twidth: 100%;\n\ttext-align: left;\n\tcolor: <<colour foreground>>;\n\tline-height: 1.4;\n}\n\n.tc-drop-down .tc-tab-set .tc-tab-buttons button {\n\tdisplay: inline-block;\n    width: auto;\n    margin-bottom: 0px;\n    border-bottom-left-radius: 0;\n    border-bottom-right-radius: 0;\n}\n\n.tc-drop-down .tc-prompt {\n\tpadding: 0 14px;\n}\n\n.tc-drop-down .tc-chooser {\n\tborder: none;\n}\n\n.tc-drop-down .tc-chooser .tc-swatches-horiz {\n\tfont-size: 0.4em;\n\tpadding-left: 1.2em;\n}\n\n.tc-drop-down .tc-file-input-wrapper {\n\twidth: 100%;\n}\n\n.tc-drop-down .tc-file-input-wrapper button {\n\tcolor: <<colour foreground>>;\n}\n\n.tc-drop-down a:hover, .tc-drop-down button:hover, .tc-drop-down .tc-file-input-wrapper:hover button {\n\tcolor: <<colour tiddler-link-background>>;\n\tbackground-color: <<colour tiddler-link-foreground>>;\n\ttext-decoration: none;\n}\n\n.tc-drop-down .tc-tab-buttons button {\n\tbackground-color: <<colour dropdown-tab-background>>;\n}\n\n.tc-drop-down .tc-tab-buttons button.tc-tab-selected {\n\tbackground-color: <<colour dropdown-tab-background-selected>>;\n\tborder-bottom: 1px solid <<colour dropdown-tab-background-selected>>;\n}\n\n.tc-drop-down-bullet {\n\tdisplay: inline-block;\n\twidth: 0.5em;\n}\n\n.tc-drop-down .tc-tab-contents a {\n\tpadding: 0 0.5em 0 0.5em;\n}\n\n.tc-block-dropdown-wrapper {\n\tposition: relative;\n}\n\n.tc-block-dropdown {\n\tposition: absolute;\n\tmin-width: 220px;\n\tborder: 1px solid <<colour dropdown-border>>;\n\tbackground-color: <<colour dropdown-background>>;\n\tpadding: 7px 0;\n\tmargin: 4px 0 0 0;\n\twhite-space: nowrap;\n\tz-index: 1000;\n\ttext-shadow: none;\n}\n\n.tc-block-dropdown.tc-search-drop-down {\n\tmargin-left: -12px;\n}\n\n.tc-block-dropdown a {\n\tdisplay: block;\n\tpadding: 4px 14px 4px 14px;\n}\n\n.tc-block-dropdown.tc-search-drop-down a {\n\tdisplay: block;\n\tpadding: 0px 10px 0px 10px;\n}\n\n.tc-drop-down .tc-dropdown-item-plain,\n.tc-block-dropdown .tc-dropdown-item-plain {\n\tpadding: 4px 14px 4px 7px;\n}\n\n.tc-drop-down .tc-dropdown-item,\n.tc-block-dropdown .tc-dropdown-item {\n\tpadding: 4px 14px 4px 7px;\n\tcolor: <<colour muted-foreground>>;\n}\n\n.tc-block-dropdown a:hover {\n\tcolor: <<colour tiddler-link-background>>;\n\tbackground-color: <<colour tiddler-link-foreground>>;\n\ttext-decoration: none;\n}\n\n.tc-search-results {\n\tpadding: 0 7px 0 7px;\n}\n\n.tc-image-chooser, .tc-colour-chooser {\n\twhite-space: normal;\n}\n\n.tc-image-chooser a,\n.tc-colour-chooser a {\n\tdisplay: inline-block;\n\tvertical-align: top;\n\ttext-align: center;\n\tposition: relative;\n}\n\n.tc-image-chooser a {\n\tborder: 1px solid <<colour muted-foreground>>;\n\tpadding: 2px;\n\tmargin: 2px;\n\twidth: 4em;\n\theight: 4em;\n}\n\n.tc-colour-chooser a {\n\tpadding: 3px;\n\twidth: 2em;\n\theight: 2em;\n\tvertical-align: middle;\n}\n\n.tc-image-chooser a:hover,\n.tc-colour-chooser a:hover {\n\tbackground: <<colour primary>>;\n\tpadding: 0px;\n\tborder: 3px solid <<colour primary>>;\n}\n\n.tc-image-chooser a svg,\n.tc-image-chooser a img {\n\tdisplay: inline-block;\n\twidth: auto;\n\theight: auto;\n\tmax-width: 3.5em;\n\tmax-height: 3.5em;\n\tposition: absolute;\n\ttop: 0;\n\tbottom: 0;\n\tleft: 0;\n\tright: 0;\n\tmargin: auto;\n}\n\n/*\n** Modals\n*/\n\n.tc-modal-wrapper {\n\tposition: fixed;\n\toverflow: auto;\n\toverflow-y: scroll;\n\ttop: 0;\n\tright: 0;\n\tbottom: 0;\n\tleft: 0;\n\tz-index: 900;\n}\n\n.tc-modal-backdrop {\n\tposition: fixed;\n\ttop: 0;\n\tright: 0;\n\tbottom: 0;\n\tleft: 0;\n\tz-index: 1000;\n\tbackground-color: <<colour modal-backdrop>>;\n}\n\n.tc-modal {\n\tz-index: 1100;\n\tbackground-color: <<colour modal-background>>;\n\tborder: 1px solid <<colour modal-border>>;\n}\n\n@media (max-width: 55em) {\n\t.tc-modal {\n\t\tposition: fixed;\n\t\ttop: 1em;\n\t\tleft: 1em;\n\t\tright: 1em;\n\t}\n\n\t.tc-modal-body {\n\t\toverflow-y: auto;\n\t\tmax-height: 400px;\n\t\tmax-height: 60vh;\n\t}\n}\n\n@media (min-width: 55em) {\n\t.tc-modal {\n\t\tposition: fixed;\n\t\ttop: 2em;\n\t\tleft: 25%;\n\t\twidth: 50%;\n\t}\n\n\t.tc-modal-body {\n\t\toverflow-y: auto;\n\t\tmax-height: 400px;\n\t\tmax-height: 60vh;\n\t}\n}\n\n.tc-modal-header {\n\tpadding: 9px 15px;\n\tborder-bottom: 1px solid <<colour modal-header-border>>;\n}\n\n.tc-modal-header h3 {\n\tmargin: 0;\n\tline-height: 30px;\n}\n\n.tc-modal-header img, .tc-modal-header svg {\n\twidth: 1em;\n\theight: 1em;\n}\n\n.tc-modal-body {\n\tpadding: 15px;\n}\n\n.tc-modal-footer {\n\tpadding: 14px 15px 15px;\n\tmargin-bottom: 0;\n\ttext-align: right;\n\tbackground-color: <<colour modal-footer-background>>;\n\tborder-top: 1px solid <<colour modal-footer-border>>;\n}\n\n/*\n** Notifications\n*/\n\n.tc-notification {\n\tposition: fixed;\n\ttop: 14px;\n\tright: 42px;\n\tz-index: 1300;\n\tmax-width: 280px;\n\tpadding: 0 14px 0 14px;\n\tbackground-color: <<colour notification-background>>;\n\tborder: 1px solid <<colour notification-border>>;\n}\n\n/*\n** Tabs\n*/\n\n.tc-tab-set.tc-vertical {\n\tdisplay: -webkit-flex;\n\tdisplay: flex;\n}\n\n.tc-tab-buttons {\n\tfont-size: 0.85em;\n\tpadding-top: 1em;\n\tmargin-bottom: -2px;\n}\n\n.tc-tab-buttons.tc-vertical  {\n\tz-index: 100;\n\tdisplay: block;\n\tpadding-top: 14px;\n\tvertical-align: top;\n\ttext-align: right;\n\tmargin-bottom: inherit;\n\tmargin-right: -1px;\n\tmax-width: 33%;\n\t-webkit-flex: 0 0 auto;\n\tflex: 0 0 auto;\n}\n\n.tc-tab-buttons button.tc-tab-selected {\n\tcolor: <<colour tab-foreground-selected>>;\n\tbackground-color: <<colour tab-background-selected>>;\n\tborder-left: 1px solid <<colour tab-border-selected>>;\n\tborder-top: 1px solid <<colour tab-border-selected>>;\n\tborder-right: 1px solid <<colour tab-border-selected>>;\n}\n\n.tc-tab-buttons button {\n\tcolor: <<colour tab-foreground>>;\n\tpadding: 3px 5px 3px 5px;\n\tmargin-right: 0.3em;\n\tfont-weight: 300;\n\tborder: none;\n\tbackground: inherit;\n\tbackground-color: <<colour tab-background>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-top: 1px solid <<colour tab-border>>;\n\tborder-right: 1px solid <<colour tab-border>>;\n\tborder-top-left-radius: 2px;\n\tborder-top-right-radius: 2px;\n}\n\n.tc-tab-buttons.tc-vertical button {\n\tdisplay: block;\n\twidth: 100%;\n\tmargin-top: 3px;\n\tmargin-right: 0;\n\ttext-align: right;\n\tbackground-color: <<colour tab-background>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-bottom: 1px solid <<colour tab-border>>;\n\tborder-right: none;\n\tborder-top-left-radius: 2px;\n\tborder-bottom-left-radius: 2px;\n}\n\n.tc-tab-buttons.tc-vertical button.tc-tab-selected {\n\tbackground-color: <<colour tab-background-selected>>;\n\tborder-right: 1px solid <<colour tab-background-selected>>;\n}\n\n.tc-tab-divider {\n\tborder-top: 1px solid <<colour tab-divider>>;\n}\n\n.tc-tab-divider.tc-vertical  {\n\tdisplay: none;\n}\n\n.tc-tab-content {\n\tmargin-top: 14px;\n}\n\n.tc-tab-content.tc-vertical  {\n\tdisplay: inline-block;\n\tvertical-align: top;\n\tpadding-top: 0;\n\tpadding-left: 14px;\n\tborder-left: 1px solid <<colour tab-border>>;\n\t-webkit-flex: 1 0 70%;\n\tflex: 1 0 70%;\n}\n\n.tc-sidebar-lists .tc-tab-buttons {\n\tmargin-bottom: -1px;\n}\n\n.tc-sidebar-lists .tc-tab-buttons button.tc-tab-selected {\n\tbackground-color: <<colour sidebar-tab-background-selected>>;\n\tcolor: <<colour sidebar-tab-foreground-selected>>;\n\tborder-left: 1px solid <<colour sidebar-tab-border-selected>>;\n\tborder-top: 1px solid <<colour sidebar-tab-border-selected>>;\n\tborder-right: 1px solid <<colour sidebar-tab-border-selected>>;\n}\n\n.tc-sidebar-lists .tc-tab-buttons button {\n\tbackground-color: <<colour sidebar-tab-background>>;\n\tcolor: <<colour sidebar-tab-foreground>>;\n\tborder-left: 1px solid <<colour sidebar-tab-border>>;\n\tborder-top: 1px solid <<colour sidebar-tab-border>>;\n\tborder-right: 1px solid <<colour sidebar-tab-border>>;\n}\n\n.tc-sidebar-lists .tc-tab-divider {\n\tborder-top: 1px solid <<colour sidebar-tab-divider>>;\n}\n\n.tc-more-sidebar .tc-tab-buttons button {\n\tdisplay: block;\n\twidth: 100%;\n\tbackground-color: <<colour sidebar-tab-background>>;\n\tborder-top: none;\n\tborder-left: none;\n\tborder-bottom: none;\n\tborder-right: 1px solid #ccc;\n\tmargin-bottom: inherit;\n}\n\n.tc-more-sidebar .tc-tab-buttons button.tc-tab-selected {\n\tbackground-color: <<colour sidebar-tab-background-selected>>;\n\tborder: none;\n}\n\n/*\n** Alerts\n*/\n\n.tc-alerts {\n\tposition: fixed;\n\ttop: 0;\n\tleft: 0;\n\tmax-width: 500px;\n\tz-index: 20000;\n}\n\n.tc-alert {\n\tposition: relative;\n\tmargin: 28px;\n\tpadding: 14px 14px 14px 14px;\n\tborder: 2px solid <<colour alert-border>>;\n\tbackground-color: <<colour alert-background>>;\n}\n\n.tc-alert-toolbar {\n\tposition: absolute;\n\ttop: 14px;\n\tright: 14px;\n}\n\n.tc-alert-toolbar svg {\n\tfill: <<colour alert-muted-foreground>>;\n}\n\n.tc-alert-subtitle {\n\tcolor: <<colour alert-muted-foreground>>;\n\tfont-weight: bold;\n}\n\n.tc-alert-highlight {\n\tcolor: <<colour alert-highlight>>;\n}\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-static-alert {\n\t\tposition: relative;\n\t}\n\n\t.tc-static-alert-inner {\n\t\tposition: absolute;\n\t\tz-index: 100;\n\t}\n\n}\n\n.tc-static-alert-inner {\n\tpadding: 0 2px 2px 42px;\n\tcolor: <<colour static-alert-foreground>>;\n}\n\n/*\n** Control panel\n*/\n\n.tc-control-panel td {\n\tpadding: 4px;\n}\n\n.tc-control-panel table, .tc-control-panel table input, .tc-control-panel table textarea {\n\twidth: 100%;\n}\n\n.tc-plugin-info {\n\tdisplay: block;\n\tborder: 1px solid <<colour muted-foreground>>;\n\tbackground-colour: <<colour background>>;\n\tmargin: 0.5em 0 0.5em 0;\n\tpadding: 4px;\n}\n\n.tc-plugin-info-disabled {\n\tbackground: -webkit-repeating-linear-gradient(45deg, #ff0, #ff0 10px, #eee 10px, #eee 20px);\n\tbackground: repeating-linear-gradient(45deg, #ff0, #ff0 10px, #eee 10px, #eee 20px);\n}\n\n.tc-plugin-info-disabled:hover {\n\tbackground: -webkit-repeating-linear-gradient(45deg, #aa0, #aa0 10px, #888 10px, #888 20px);\n\tbackground: repeating-linear-gradient(45deg, #aa0, #aa0 10px, #888 10px, #888 20px);\n}\n\na.tc-tiddlylink.tc-plugin-info:hover {\n\ttext-decoration: none;\n\tbackground-color: <<colour primary>>;\n\tcolor: <<colour background>>;\n\tfill: <<colour foreground>>;\n}\n\na.tc-tiddlylink.tc-plugin-info:hover .tc-plugin-info > .tc-plugin-info-chunk > svg {\n\tfill: <<colour foreground>>;\n}\n\n.tc-plugin-info-chunk {\n\tdisplay: inline-block;\n\tvertical-align: middle;\n}\n\n.tc-plugin-info-chunk h1 {\n\tfont-size: 1em;\n\tmargin: 2px 0 2px 0;\n}\n\n.tc-plugin-info-chunk h2 {\n\tfont-size: 0.8em;\n\tmargin: 2px 0 2px 0;\n}\n\n.tc-plugin-info-chunk div {\n\tfont-size: 0.7em;\n\tmargin: 2px 0 2px 0;\n}\n\n.tc-plugin-info:hover > .tc-plugin-info-chunk > img, .tc-plugin-info:hover > .tc-plugin-info-chunk > svg {\n\twidth: 2em;\n\theight: 2em;\n\tfill: <<colour foreground>>;\n}\n\n.tc-plugin-info > .tc-plugin-info-chunk > img, .tc-plugin-info > .tc-plugin-info-chunk > svg {\n\twidth: 2em;\n\theight: 2em;\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-plugin-info.tc-small-icon > .tc-plugin-info-chunk > img, .tc-plugin-info.tc-small-icon > .tc-plugin-info-chunk > svg {\n\twidth: 1em;\n\theight: 1em;\n}\n\n.tc-plugin-info-dropdown {\n\tborder: 1px solid <<colour muted-foreground>>;\n\tmargin-top: -8px;\n}\n\n.tc-plugin-info-dropdown-message {\n\tbackground: <<colour message-background>>;\n\tpadding: 0.5em 1em 0.5em 1em;\n\tfont-weight: bold;\n\tfont-size: 0.8em;\n}\n\n.tc-plugin-info-dropdown-body {\n\tpadding: 1em 1em 1em 1em;\n}\n\n/*\n** Message boxes\n*/\n\n.tc-message-box {\n\tborder: 1px solid <<colour message-border>>;\n\tbackground: <<colour message-background>>;\n\tpadding: 0px 21px 0px 21px;\n\tfont-size: 12px;\n\tline-height: 18px;\n\tcolor: <<colour message-foreground>>;\n}\n\n/*\n** Pictures\n*/\n\n.tc-bordered-image {\n\tborder: 1px solid <<colour muted-foreground>>;\n\tpadding: 5px;\n\tmargin: 5px;\n}\n\n/*\n** Floats\n*/\n\n.tc-float-right {\n\tfloat: right;\n}\n\n/*\n** Chooser\n*/\n\n.tc-chooser {\n\tborder: 1px solid <<colour table-border>>;\n}\n\n.tc-chooser-item {\n\tborder: 8px;\n\tpadding: 2px 4px;\n}\n\n.tc-chooser-item a.tc-tiddlylink {\n\tdisplay: block;\n\ttext-decoration: none;\n\tcolor: <<colour tiddler-link-foreground>>;\n\tbackground-color: <<colour tiddler-link-background>>;\n}\n\n.tc-chooser-item a.tc-tiddlylink:hover {\n\ttext-decoration: none;\n\tcolor: <<colour tiddler-link-background>>;\n\tbackground-color: <<colour tiddler-link-foreground>>;\n}\n\n/*\n** Palette swatches\n*/\n\n.tc-swatches-horiz {\n}\n\n.tc-swatches-horiz .tc-swatch {\n\tdisplay: inline-block;\n}\n\n.tc-swatch {\n\twidth: 2em;\n\theight: 2em;\n\tmargin: 0.4em;\n\tborder: 1px solid #888;\n}\n\n/*\n** Table of contents\n*/\n\n.tc-sidebar-lists .tc-table-of-contents {\n\twhite-space: nowrap;\n}\n\n.tc-table-of-contents button {\n\tcolor: <<colour sidebar-foreground>>;\n}\n\n.tc-table-of-contents svg {\n\twidth: 0.7em;\n\theight: 0.7em;\n\tvertical-align: middle;\n\tfill: <<colour sidebar-foreground>>;\n}\n\n.tc-table-of-contents ol {\n\tlist-style-type: none;\n\tpadding-left: 0;\n}\n\n.tc-table-of-contents ol ol {\n\tpadding-left: 1em;\n}\n\n.tc-table-of-contents li {\n\tfont-size: 1.0em;\n\tfont-weight: bold;\n}\n\n.tc-table-of-contents li a {\n\tfont-weight: bold;\n}\n\n.tc-table-of-contents li li {\n\tfont-size: 0.95em;\n\tfont-weight: normal;\n\tline-height: 1.4;\n}\n\n.tc-table-of-contents li li a {\n\tfont-weight: normal;\n}\n\n.tc-table-of-contents li li li {\n\tfont-size: 0.95em;\n\tfont-weight: 200;\n\tline-height: 1.5;\n}\n\n.tc-table-of-contents li li li a {\n\tfont-weight: bold;\n}\n\n.tc-table-of-contents li li li li {\n\tfont-size: 0.95em;\n\tfont-weight: 200;\n}\n\n.tc-tabbed-table-of-contents {\n\tdisplay: -webkit-flex;\n\tdisplay: flex;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents {\n\tz-index: 100;\n\tdisplay: inline-block;\n\tpadding-left: 1em;\n\tmax-width: 50%;\n\t-webkit-flex: 0 0 auto;\n\tflex: 0 0 auto;\n\tbackground: <<colour tab-background>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-top: 1px solid <<colour tab-border>>;\n\tborder-bottom: 1px solid <<colour tab-border>>;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item > a,\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item-selected > a {\n\tdisplay: block;\n\tpadding: 0.12em 1em 0.12em 0.25em;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item > a {\n\tborder-top: 1px solid <<colour tab-background>>;\n\tborder-left: 1px solid <<colour tab-background>>;\n\tborder-bottom: 1px solid <<colour tab-background>>;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item > a:hover {\n\ttext-decoration: none;\n\tborder-top: 1px solid <<colour tab-border>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-bottom: 1px solid <<colour tab-border>>;\n\tbackground: <<colour tab-border>>;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item-selected > a {\n\tborder-top: 1px solid <<colour tab-border>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-bottom: 1px solid <<colour tab-border>>;\n\tbackground: <<colour background>>;\n\tmargin-right: -1px;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item-selected > a:hover {\n\ttext-decoration: none;\n}\n\n.tc-tabbed-table-of-contents .tc-tabbed-table-of-contents-content {\n\tdisplay: inline-block;\n\tvertical-align: top;\n\tpadding-left: 1.5em;\n\tpadding-right: 1.5em;\n\tborder: 1px solid <<colour tab-border>>;\n\t-webkit-flex: 1 0 50%;\n\tflex: 1 0 50%;\n}\n\n/*\n** Dirty indicator\n*/\n\nbody.tc-dirty span.tc-dirty-indicator, body.tc-dirty span.tc-dirty-indicator svg {\n\tfill: <<colour dirty-indicator>>;\n\tcolor: <<colour dirty-indicator>>;\n}\n\n/*\n** File inputs\n*/\n\n.tc-file-input-wrapper {\n\tposition: relative;\n\toverflow: hidden;\n\tdisplay: inline-block;\n\tvertical-align: middle;\n}\n\n.tc-file-input-wrapper input[type=file] {\n\tposition: absolute;\n\ttop: 0;\n\tleft: 0;\n\tright: 0;\n\tbottom: 0;\n\tfont-size: 999px;\n\tmax-width: 100%;\n\tmax-height: 100%;\n\tfilter: alpha(opacity=0);\n\topacity: 0;\n\toutline: none;\n\tbackground: white;\n\tcursor: pointer;\n\tdisplay: inline-block;\n}\n\n/*\n** Thumbnail macros\n*/\n\n.tc-thumbnail-wrapper {\n\tposition: relative;\n\tdisplay: inline-block;\n\tmargin: 6px;\n\tvertical-align: top;\n}\n\n.tc-thumbnail-right-wrapper {\n\tfloat:right;\n\tmargin: 0.5em 0 0.5em 0.5em;\n}\n\n.tc-thumbnail-image {\n\ttext-align: center;\n\toverflow: hidden;\n\tborder-radius: 3px;\n}\n\n.tc-thumbnail-image svg,\n.tc-thumbnail-image img {\n\tfilter: alpha(opacity=1);\n\topacity: 1;\n\tmin-width: 100%;\n\tmin-height: 100%;\n\tmax-width: 100%;\n}\n\n.tc-thumbnail-wrapper:hover .tc-thumbnail-image svg,\n.tc-thumbnail-wrapper:hover .tc-thumbnail-image img {\n\tfilter: alpha(opacity=0.8);\n\topacity: 0.8;\n}\n\n.tc-thumbnail-background {\n\tposition: absolute;\n\tborder-radius: 3px;\n}\n\n.tc-thumbnail-icon svg,\n.tc-thumbnail-icon img {\n\twidth: 3em;\n\theight: 3em;\n\t<<filter \"drop-shadow(2px 2px 4px rgba(0,0,0,0.3))\">>\n}\n\n.tc-thumbnail-wrapper:hover .tc-thumbnail-icon svg,\n.tc-thumbnail-wrapper:hover .tc-thumbnail-icon img {\n\tfill: #fff;\n\t<<filter \"drop-shadow(3px 3px 4px rgba(0,0,0,0.6))\">>\n}\n\n.tc-thumbnail-icon {\n\tposition: absolute;\n\ttop: 0;\n\tleft: 0;\n\tright: 0;\n\tbottom: 0;\n\tdisplay: -webkit-flex;\n\t-webkit-align-items: center;\n\t-webkit-justify-content: center;\n\tdisplay: flex;\n\talign-items: center;\n\tjustify-content: center;\n}\n\n.tc-thumbnail-caption {\n\tposition: absolute;\n\tbackground-color: #777;\n\tcolor: #fff;\n\ttext-align: center;\n\tbottom: 0;\n\twidth: 100%;\n\tfilter: alpha(opacity=0.9);\n\topacity: 0.9;\n\tline-height: 1.4;\n\tborder-bottom-left-radius: 3px;\n\tborder-bottom-right-radius: 3px;\n}\n\n.tc-thumbnail-wrapper:hover .tc-thumbnail-caption {\n\tfilter: alpha(opacity=1);\n\topacity: 1;\n}\n\n/*\n** Errors\n*/\n\n.tc-error {\n\tbackground: #f00;\n\tcolor: #fff;\n}\n"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize",
            "text": "15px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/bodylineheight": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/bodylineheight",
            "text": "22px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/fontsize": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/fontsize",
            "text": "14px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/lineheight": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/lineheight",
            "text": "20px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/storyleft": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/storyleft",
            "text": "0px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/storytop": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/storytop",
            "text": "0px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/storyright": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/storyright",
            "text": "770px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/storywidth": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/storywidth",
            "text": "770px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth",
            "text": "686px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint",
            "text": "960px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth",
            "text": "350px"
        },
        "$:/themes/tiddlywiki/vanilla/options/stickytitles": {
            "title": "$:/themes/tiddlywiki/vanilla/options/stickytitles",
            "text": "no"
        },
        "$:/themes/tiddlywiki/vanilla/options/sidebarlayout": {
            "title": "$:/themes/tiddlywiki/vanilla/options/sidebarlayout",
            "text": "fixed-fluid"
        },
        "$:/themes/tiddlywiki/vanilla/options/codewrapping": {
            "title": "$:/themes/tiddlywiki/vanilla/options/codewrapping",
            "text": "pre-wrap"
        },
        "$:/themes/tiddlywiki/vanilla/reset": {
            "title": "$:/themes/tiddlywiki/vanilla/reset",
            "type": "text/plain",
            "text": "/*! normalize.css v3.0.0 | MIT License | git.io/normalize */\n\n/**\n * 1. Set default font family to sans-serif.\n * 2. Prevent iOS text size adjust after orientation change, without disabling\n *    user zoom.\n */\n\nhtml {\n  font-family: sans-serif; /* 1 */\n  -ms-text-size-adjust: 100%; /* 2 */\n  -webkit-text-size-adjust: 100%; /* 2 */\n}\n\n/**\n * Remove default margin.\n */\n\nbody {\n  margin: 0;\n}\n\n/* HTML5 display definitions\n   ========================================================================== */\n\n/**\n * Correct `block` display not defined in IE 8/9.\n */\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nnav,\nsection,\nsummary {\n  display: block;\n}\n\n/**\n * 1. Correct `inline-block` display not defined in IE 8/9.\n * 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n */\n\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block; /* 1 */\n  vertical-align: baseline; /* 2 */\n}\n\n/**\n * Prevent modern browsers from displaying `audio` without controls.\n * Remove excess height in iOS 5 devices.\n */\n\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n\n/**\n * Address `[hidden]` styling not present in IE 8/9.\n * Hide the `template` element in IE, Safari, and Firefox < 22.\n */\n\n[hidden],\ntemplate {\n  display: none;\n}\n\n/* Links\n   ========================================================================== */\n\n/**\n * Remove the gray background color from active links in IE 10.\n */\n\na {\n  background: transparent;\n}\n\n/**\n * Improve readability when focused and also mouse hovered in all browsers.\n */\n\na:active,\na:hover {\n  outline: 0;\n}\n\n/* Text-level semantics\n   ========================================================================== */\n\n/**\n * Address styling not present in IE 8/9, Safari 5, and Chrome.\n */\n\nabbr[title] {\n  border-bottom: 1px dotted;\n}\n\n/**\n * Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome.\n */\n\nb,\nstrong {\n  font-weight: bold;\n}\n\n/**\n * Address styling not present in Safari 5 and Chrome.\n */\n\ndfn {\n  font-style: italic;\n}\n\n/**\n * Address variable `h1` font-size and margin within `section` and `article`\n * contexts in Firefox 4+, Safari 5, and Chrome.\n */\n\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\n\n/**\n * Address styling not present in IE 8/9.\n */\n\nmark {\n  background: #ff0;\n  color: #000;\n}\n\n/**\n * Address inconsistent and variable font size in all browsers.\n */\n\nsmall {\n  font-size: 80%;\n}\n\n/**\n * Prevent `sub` and `sup` affecting `line-height` in all browsers.\n */\n\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\n\nsup {\n  top: -0.5em;\n}\n\nsub {\n  bottom: -0.25em;\n}\n\n/* Embedded content\n   ========================================================================== */\n\n/**\n * Remove border when inside `a` element in IE 8/9.\n */\n\nimg {\n  border: 0;\n}\n\n/**\n * Correct overflow displayed oddly in IE 9.\n */\n\nsvg:not(:root) {\n  overflow: hidden;\n}\n\n/* Grouping content\n   ========================================================================== */\n\n/**\n * Address margin not present in IE 8/9 and Safari 5.\n */\n\nfigure {\n  margin: 1em 40px;\n}\n\n/**\n * Address differences between Firefox and other browsers.\n */\n\nhr {\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n  height: 0;\n}\n\n/**\n * Contain overflow in all browsers.\n */\n\npre {\n  overflow: auto;\n}\n\n/**\n * Address odd `em`-unit font size rendering in all browsers.\n */\n\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\n\n/* Forms\n   ========================================================================== */\n\n/**\n * Known limitation: by default, Chrome and Safari on OS X allow very limited\n * styling of `select`, unless a `border` property is set.\n */\n\n/**\n * 1. Correct color not being inherited.\n *    Known issue: affects color of disabled elements.\n * 2. Correct font properties not being inherited.\n * 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome.\n */\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit; /* 1 */\n  font: inherit; /* 2 */\n  margin: 0; /* 3 */\n}\n\n/**\n * Address `overflow` set to `hidden` in IE 8/9/10.\n */\n\nbutton {\n  overflow: visible;\n}\n\n/**\n * Address inconsistent `text-transform` inheritance for `button` and `select`.\n * All other form control elements do not inherit `text-transform` values.\n * Correct `button` style inheritance in Firefox, IE 8+, and Opera\n * Correct `select` style inheritance in Firefox.\n */\n\nbutton,\nselect {\n  text-transform: none;\n}\n\n/**\n * 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n *    and `video` controls.\n * 2. Correct inability to style clickable `input` types in iOS.\n * 3. Improve usability and consistency of cursor style between image-type\n *    `input` and others.\n */\n\nbutton,\nhtml input[type=\"button\"], /* 1 */\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button; /* 2 */\n  cursor: pointer; /* 3 */\n}\n\n/**\n * Re-set default cursor for disabled elements.\n */\n\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\n\n/**\n * Remove inner padding and border in Firefox 4+.\n */\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\n\n/**\n * Address Firefox 4+ setting `line-height` on `input` using `!important` in\n * the UA stylesheet.\n */\n\ninput {\n  line-height: normal;\n}\n\n/**\n * It's recommended that you don't attempt to style these elements.\n * Firefox's implementation doesn't respect box-sizing, padding, or width.\n *\n * 1. Address box sizing set to `content-box` in IE 8/9/10.\n * 2. Remove excess padding in IE 8/9/10.\n */\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box; /* 1 */\n  padding: 0; /* 2 */\n}\n\n/**\n * Fix the cursor style for Chrome's increment/decrement buttons. For certain\n * `font-size` values of the `input`, it causes the cursor style of the\n * decrement button to change from `default` to `text`.\n */\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\n\n/**\n * 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome.\n * 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome\n *    (include `-moz` to future-proof).\n */\n\ninput[type=\"search\"] {\n  -webkit-appearance: textfield; /* 1 */\n  -moz-box-sizing: content-box;\n  -webkit-box-sizing: content-box; /* 2 */\n  box-sizing: content-box;\n}\n\n/**\n * Remove inner padding and search cancel button in Safari and Chrome on OS X.\n * Safari (but not Chrome) clips the cancel button when the search input has\n * padding (and `textfield` appearance).\n */\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\n\n/**\n * Define consistent border, margin, and padding.\n */\n\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\n\n/**\n * 1. Correct `color` not being inherited in IE 8/9.\n * 2. Remove padding so people aren't caught out if they zero out fieldsets.\n */\n\nlegend {\n  border: 0; /* 1 */\n  padding: 0; /* 2 */\n}\n\n/**\n * Remove default vertical scrollbar in IE 8/9.\n */\n\ntextarea {\n  overflow: auto;\n}\n\n/**\n * Don't inherit the `font-weight` (applied by a rule above).\n * NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n */\n\noptgroup {\n  font-weight: bold;\n}\n\n/* Tables\n   ========================================================================== */\n\n/**\n * Remove most spacing between table cells.\n */\n\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\n\ntd,\nth {\n  padding: 0;\n}\n"
        },
        "$:/themes/tiddlywiki/vanilla/settings/fontfamily": {
            "title": "$:/themes/tiddlywiki/vanilla/settings/fontfamily",
            "text": "\"Helvetica Neue\", Helvetica, Arial, \"Lucida Grande\", \"DejaVu Sans\", sans-serif"
        },
        "$:/themes/tiddlywiki/vanilla/settings/codefontfamily": {
            "title": "$:/themes/tiddlywiki/vanilla/settings/codefontfamily",
            "text": "Monaco, Consolas, \"Lucida Console\", \"DejaVu Sans Mono\", monospace"
        },
        "$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment": {
            "title": "$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment",
            "text": "fixed"
        },
        "$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize": {
            "title": "$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize",
            "text": "auto"
        },
        "$:/themes/tiddlywiki/vanilla/sticky": {
            "title": "$:/themes/tiddlywiki/vanilla/sticky",
            "text": "<$reveal state=\"$:/themes/tiddlywiki/vanilla/options/stickytitles\" type=\"match\" text=\"yes\">\n``\n.tc-tiddler-title {\n\tposition: -webkit-sticky;\n\tposition: -moz-sticky;\n\tposition: -o-sticky;\n\tposition: -ms-sticky;\n\tposition: sticky;\n\ttop: 0px;\n\tbackground: ``<<colour tiddler-background>>``;\n\tz-index: 500;\n}\n``\n</$reveal>\n"
        },
        "$:/themes/tiddlywiki/vanilla/themetweaks": {
            "title": "$:/themes/tiddlywiki/vanilla/themetweaks",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ThemeTweaks/ThemeTweaks}}",
            "text": "\\define lingo-base() $:/language/ThemeTweaks/\n\n\\define replacement-text()\n[img[$(imageTitle)$]]\n\\end\n\n\\define backgroundimage-dropdown()\n<div class=\"tc-drop-down-wrapper\">\n<$button popup=<<qualify \"$:/state/popup/themetweaks/backgroundimage\">> class=\"tc-btn-invisible tc-btn-dropdown\">{{$:/core/images/down-arrow}}</$button>\n<$reveal state=<<qualify \"$:/state/popup/themetweaks/backgroundimage\">> type=\"popup\" position=\"belowleft\" text=\"\" default=\"\">\n<div class=\"tc-drop-down\">\n<$macrocall $name=\"image-picker\" actions=\"\"\"\n\n<$action-setfield\n\t$tiddler=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimage\"\n\t$value=<<imageTitle>>\n/>\n\n\"\"\"/>\n</div>\n</$reveal>\n</div>\n\\end\n\n\\define backgroundimageattachment-dropdown()\n<$select tiddler=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment\" default=\"scroll\">\n<option value=\"scroll\"><<lingo Settings/BackgroundImageAttachment/Scroll>></option>\n<option value=\"fixed\"><<lingo Settings/BackgroundImageAttachment/Fixed>></option>\n</$select>\n\\end\n\n\\define backgroundimagesize-dropdown()\n<$select tiddler=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize\" default=\"scroll\">\n<option value=\"auto\"><<lingo Settings/BackgroundImageSize/Auto>></option>\n<option value=\"cover\"><<lingo Settings/BackgroundImageSize/Cover>></option>\n<option value=\"contain\"><<lingo Settings/BackgroundImageSize/Contain>></option>\n</$select>\n\\end\n\n<<lingo ThemeTweaks/Hint>>\n\n! <<lingo Options>>\n\n|<$link to=\"$:/themes/tiddlywiki/vanilla/options/sidebarlayout\"><<lingo Options/SidebarLayout>></$link> |<$select tiddler=\"$:/themes/tiddlywiki/vanilla/options/sidebarlayout\"><option value=\"fixed-fluid\"><<lingo Options/SidebarLayout/Fixed-Fluid>></option><option value=\"fluid-fixed\"><<lingo Options/SidebarLayout/Fluid-Fixed>></option></$select> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/options/stickytitles\"><<lingo Options/StickyTitles>></$link><br>//<<lingo Options/StickyTitles/Hint>>// |<$select tiddler=\"$:/themes/tiddlywiki/vanilla/options/stickytitles\"><option value=\"no\">{{$:/language/No}}</option><option value=\"yes\">{{$:/language/Yes}}</option></$select> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/options/codewrapping\"><<lingo Options/CodeWrapping>></$link> |<$select tiddler=\"$:/themes/tiddlywiki/vanilla/options/codewrapping\"><option value=\"pre\">{{$:/language/No}}</option><option value=\"pre-wrap\">{{$:/language/Yes}}</option></$select> |\n\n! <<lingo Settings>>\n\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/fontfamily\"><<lingo Settings/FontFamily>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/settings/fontfamily\" default=\"\" tag=\"input\"/> | |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/codefontfamily\"><<lingo Settings/CodeFontFamily>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/settings/codefontfamily\" default=\"\" tag=\"input\"/> | |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimage\"><<lingo Settings/BackgroundImage>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimage\" default=\"\" tag=\"input\"/> |<<backgroundimage-dropdown>> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment\"><<lingo Settings/BackgroundImageAttachment>></$link> |<<backgroundimageattachment-dropdown>> | |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize\"><<lingo Settings/BackgroundImageSize>></$link> |<<backgroundimagesize-dropdown>> | |\n\n! <<lingo Metrics>>\n\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/fontsize\"><<lingo Metrics/FontSize>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/fontsize\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/lineheight\"><<lingo Metrics/LineHeight>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/lineheight\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize\"><<lingo Metrics/BodyFontSize>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/bodylineheight\"><<lingo Metrics/BodyLineHeight>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/bodylineheight\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/storyleft\"><<lingo Metrics/StoryLeft>></$link><br>//<<lingo Metrics/StoryLeft/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/storyleft\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/storytop\"><<lingo Metrics/StoryTop>></$link><br>//<<lingo Metrics/StoryTop/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/storytop\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/storyright\"><<lingo Metrics/StoryRight>></$link><br>//<<lingo Metrics/StoryRight/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/storyright\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/storywidth\"><<lingo Metrics/StoryWidth>></$link><br>//<<lingo Metrics/StoryWidth/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/storywidth\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth\"><<lingo Metrics/TiddlerWidth>></$link><br>//<<lingo Metrics/TiddlerWidth/Hint>>//<br> |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint\"><<lingo Metrics/SidebarBreakpoint>></$link><br>//<<lingo Metrics/SidebarBreakpoint/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth\"><<lingo Metrics/SidebarWidth>></$link><br>//<<lingo Metrics/SidebarWidth/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth\" default=\"\" tag=\"input\"/> |\n"
        }
    }
}
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1805.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-8|PAPER Thu-3-9-8 — Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-1|PAPER Mon-3-3-1 — Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-3|PAPER Tue-1-8-3 — An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1501.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-5|PAPER Tue-1-2-5 — Sum-Product Networks for Robust Automatic Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sum-Product Networks for Robust Automatic Speaker Identification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1551.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-7|PAPER Wed-2-5-7 — A Deep Learning-Based Kalman Filter for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Learning-Based Kalman Filter for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1891.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-3|PAPER Mon-2-8-3 — Contrastive Predictive Coding of Audio with an Adversary]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contrastive Predictive Coding of Audio with an Adversary</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1112.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-10|PAPER Thu-2-8-10 — Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-1|PAPER Wed-2-10-1 — Transfer Learning of Articulatory Information Through Phone Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning of Articulatory Information Through Phone Information</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1140.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-2|PAPER Wed-2-10-2 — Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-9|PAPER Mon-SS-2-6-9 — Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-5|PAPER Wed-3-3-5 — Speaker Discrimination in Humans and Machines: Effects of Speaking Style Variability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Discrimination in Humans and Machines: Effects of Speaking Style Variability</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-8|PAPER Thu-2-6-8 — Analysis of Disfluency in Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Disfluency in Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-6|PAPER Thu-2-7-6 — Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2708.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-7|PAPER Wed-2-10-7 — Attention and Encoder-Decoder Based Models for Transforming Articulatory Movements at Different Speaking Rates]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention and Encoder-Decoder Based Models for Transforming Articulatory Movements at Different Speaking Rates</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-8|PAPER Mon-3-7-8 — Black-Box Adaptation of ASR for Accented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Black-Box Adaptation of ASR for Accented Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-5|PAPER Wed-SS-2-3-5 — FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-5|PAPER Mon-3-2-5 — Competency Evaluation in Voice Mimicking Using Acoustic Cues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Competency Evaluation in Voice Mimicking Using Acoustic Cues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1784.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-2|PAPER Thu-1-3-2 — Finnish ASR with Deep Transformer Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finnish ASR with Deep Transformer Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3174.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-6|PAPER Tue-1-8-6 — Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-5|PAPER Thu-2-8-5 — Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2639.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-6|PAPER Wed-1-8-6 — Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2880.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-10|PAPER Wed-2-1-10 — BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-9|PAPER Wed-2-10-9 — Surfboard: Audio Feature Extraction for Modern Machine Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surfboard: Audio Feature Extraction for Modern Machine Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3217.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-10|PAPER Wed-2-10-10 — Whisper Activity Detection Using CNN-LSTM Based Attention Pooling Network Trained for a Speaker Identification Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Activity Detection Using CNN-LSTM Based Attention Pooling Network Trained for a Speaker Identification Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-5|PAPER Wed-2-10-5 — Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2599.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-5|PAPER Thu-3-6-5 — Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2758.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-2|PAPER Thu-3-1-2 — Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-4|PAPER Wed-2-9-4 — A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2143.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-3|PAPER Thu-2-11-3 — HiFi-GAN: High-Fidelity Denoising and Dereverberation Based on Speech Deep Features in Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">HiFi-GAN: High-Fidelity Denoising and Dereverberation Based on Speech Deep Features in Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1416.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-4|PAPER Mon-2-7-4 — TTS Skins: Speaker Conversion via ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TTS Skins: Speaker Conversion via ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-7|PAPER Mon-2-7-7 — Unsupervised Cross-Domain Singing Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Cross-Domain Singing Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1460.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-5|PAPER Thu-2-3-5 — Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-5|PAPER Mon-3-2-5 — Competency Evaluation in Voice Mimicking Using Acoustic Cues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Competency Evaluation in Voice Mimicking Using Acoustic Cues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3202.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-10|PAPER Thu-2-11-10 — Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-5|PAPER Wed-SS-2-3-5 — FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1578.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-3|PAPER Thu-3-11-3 — Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-4|PAPER Mon-2-1-4 — An Investigation of Cross-Cultural Semi-Supervised Learning for Continuous Affect Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Cross-Cultural Semi-Supervised Learning for Continuous Affect Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-6|PAPER Wed-3-7-6 — Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3084.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-8|PAPER Thu-SS-1-6-8 — A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1184.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-1|PAPER Mon-2-3-1 — RECOApy: Data Recording, Pre-Processing and Phonetic Transcription for End-to-End Speech-Based Applications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RECOApy: Data Recording, Pre-Processing and Phonetic Transcription for End-to-End Speech-Based Applications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2236.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-5|PAPER Mon-1-4-5 — Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-9|PAPER Wed-3-9-9 — Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-4|PAPER Mon-1-11-4 — What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2357.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-7|PAPER Mon-2-3-7 — Word Error Rate Estimation Without ASR Output: e-WER2]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Word Error Rate Estimation Without ASR Output: e-WER2</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2271.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-5|PAPER Wed-1-10-5 — Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2421.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-7|PAPER Wed-1-11-7 — Low-Latency Single Channel Speech Dereverberation Using U-Net Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Latency Single Channel Speech Dereverberation Using U-Net Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-4|PAPER Tue-1-10-4 — Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2944.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-8|PAPER Thu-2-7-8 — Improving On-Device Speaker Verification Using Federated Learning with Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving On-Device Speaker Verification Using Federated Learning with Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2073.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-4|PAPER Wed-1-1-4 — Integrating the Application and Realization of Mandarin 3rd Tone Sandhi in the Resolution of Sentence Ambiguity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Integrating the Application and Realization of Mandarin 3rd Tone Sandhi in the Resolution of Sentence Ambiguity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1794.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-2|PAPER Mon-1-4-2 — Poetic Meter Classification Using i-Vector-MTF Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Poetic Meter Classification Using i-Vector-MTF Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2906.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-6|PAPER Mon-1-11-6 — Learning Intonation Pattern Embeddings for Arabic Dialect Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Intonation Pattern Embeddings for Arabic Dialect Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1297.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-3|PAPER Thu-2-9-3 — Transfer Learning of the Expressivity Using FLOW Metric Learning in Multispeaker Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning of the Expressivity Using FLOW Metric Learning in Multispeaker Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1805.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-8|PAPER Thu-3-9-8 — Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4008.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-4|PAPER Mon-2-12-4 — Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1861.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-6|PAPER Mon-3-7-6 — Unsupervised Domain Adaptation Under Label Space Mismatch for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Domain Adaptation Under Label Space Mismatch for Speech Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-8|PAPER Wed-2-8-8 — Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-1|PAPER Wed-3-8-1 — Multi-Stream Attention-Based BLSTM with Feature Segmentation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stream Attention-Based BLSTM with Feature Segmentation for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3078.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-7|PAPER Tue-1-1-7 — Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2674.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-3|PAPER Thu-2-10-3 — Audiovisual Correspondence Learning in Humans and Machines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audiovisual Correspondence Learning in Humans and Machines</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-5|PAPER Mon-3-2-5 — Competency Evaluation in Voice Mimicking Using Acoustic Cues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Competency Evaluation in Voice Mimicking Using Acoustic Cues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1784.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-2|PAPER Thu-1-3-2 — Finnish ASR with Deep Transformer Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finnish ASR with Deep Transformer Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2239.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-6|PAPER Wed-2-1-6 — Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-4|PAPER Wed-1-8-4 — S2IGAN: Speech-to-Image Generation via Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">S2IGAN: Speech-to-Image Generation via Adversarial Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-6|PAPER Thu-2-7-6 — Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3050.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-9|PAPER Mon-1-4-9 — Nonlinear ISA with Auxiliary Variables for Learning Speech Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear ISA with Auxiliary Variables for Learning Speech Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-7|PAPER Thu-3-5-7 — Style Variation as a Vantage Point for Code-Switching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Style Variation as a Vantage Point for Code-Switching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-7|PAPER Thu-3-1-7 — Tongue and Lip Motion Patterns in Alaryngeal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tongue and Lip Motion Patterns in Alaryngeal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2612.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-7|PAPER Tue-1-4-7 — Mandarin and English Adults’ Cue-Weighting of Lexical Stress]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin and English Adults’ Cue-Weighting of Lexical Stress</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-12|PAPER Wed-SS-1-4-12 — Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1855.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-6|PAPER Wed-2-8-6 — A New Training Pipeline for an Improved Neural Transducer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Training Pipeline for an Improved Neural Transducer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-6|PAPER Wed-SS-1-4-6 — Exploring Text and Audio Embeddings for Multi-Dimension Elderly Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Text and Audio Embeddings for Multi-Dimension Elderly Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-8|PAPER Wed-SS-1-4-8 — Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2833.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-10|PAPER Wed-SS-1-6-10 — The INESC-ID Multi-Modal System for the ADReSS 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INESC-ID Multi-Modal System for the ADReSS 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-2|PAPER Wed-3-2-2 — An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2215.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-6|PAPER Mon-2-3-6 — Confidence Measures in Encoder-Decoder Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measures in Encoder-Decoder Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3040.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-9|PAPER Mon-1-10-9 — Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-4|PAPER Mon-1-8-4 — Towards a Competitive End-to-End Speech Recognition for CHiME-6 Dinner Party Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Competitive End-to-End Speech Recognition for CHiME-6 Dinner Party Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1899.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-5|PAPER Thu-3-2-5 — Development of a Speech Quality Database Under Uncontrolled Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of a Speech Quality Database Under Uncontrolled Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2888.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-5|PAPER Tue-1-9-5 — Spotting the Traces of Depression in Read Speech: An Approach Based on Computational Paralinguistics and Social Signal Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spotting the Traces of Depression in Read Speech: An Approach Based on Computational Paralinguistics and Social Signal Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1491.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-1|PAPER Wed-3-9-1 — Accurate Detection of Wake Word Start and End Using a CNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Accurate Detection of Wake Word Start and End Using a CNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-4|PAPER Wed-SS-2-3-4 — “This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4009.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-5|PAPER Mon-1-12-5 — CATOTRON — A Neural Text-to-Speech System in Catalan]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CATOTRON — A Neural Text-to-Speech System in Catalan</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1788.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-5|PAPER Thu-1-11-5 — Naturalness Enhancement with Linguistic Information in End-to-End TTS Using Unsupervised Parallel Encoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Naturalness Enhancement with Linguistic Information in End-to-End TTS Using Unsupervised Parallel Encoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1863.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-9|PAPER Wed-1-5-9 — High Performance Sequence-to-Sequence Model for Streaming Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Performance Sequence-to-Sequence Model for Streaming Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-2|PAPER Mon-1-10-2 — Developing an Open-Source Corpus of Yoruba Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing an Open-Source Corpus of Yoruba Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1824.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-7|PAPER Wed-3-4-7 — Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1461.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-2|PAPER Wed-3-4-2 — Conditional Spoken Digit Generation with StyleGAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Spoken Digit Generation with StyleGAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-6|PAPER Mon-3-3-6 — Improving Speech Intelligibility Through Speaker Dependent and Independent Spectral Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Intelligibility Through Speaker Dependent and Independent Spectral Style Conversion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1196.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-1|PAPER Thu-3-6-1 — Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2655.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-9|PAPER Mon-3-4-9 — Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-5|PAPER Wed-3-10-5 — Principal Style Components: Expressive Style Control and Cross-Speaker Transfer in Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Principal Style Components: Expressive Style Control and Cross-Speaker Transfer in Neural TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-4|PAPER Wed-2-6-4 — Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-4|PAPER Wed-2-11-4 — Phonological Features for 0-Shot Multilingual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Features for 0-Shot Multilingual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-6|PAPER Wed-3-4-6 — Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2409.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-8|PAPER Wed-3-7-8 — Real Time Speech Enhancement in the Waveform Domain]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Speech Enhancement in the Waveform Domain</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-7|PAPER Wed-3-12-7 — Resource-Adaptive Deep Learning for Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Resource-Adaptive Deep Learning for Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2819.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-3|PAPER Thu-3-1-3 — Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-7|PAPER Wed-SS-1-4-7 — Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-12|PAPER Wed-SS-1-4-12 — Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1090.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-2|PAPER Thu-SS-2-5-2 — Extrapolating False Alarm Rates in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extrapolating False Alarm Rates in Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-9|PAPER Wed-SS-1-4-9 — Deep Attentive End-to-End Continuous Breath Sensing from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attentive End-to-End Continuous Breath Sensing from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1399.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-1|PAPER Mon-3-5-1 — Singing Synthesis: With a Little Help from my Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Synthesis: With a Little Help from my Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1251.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-1|PAPER Thu-2-9-1 — CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-4|PAPER Tue-1-2-4 — Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-1|PAPER Wed-3-2-1 — Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-3|PAPER Thu-1-7-3 — Training Speaker Enrollment Models by Network Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Speaker Enrollment Models by Network Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2121.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-9|PAPER Mon-3-3-9 — Automatic Estimation of Intelligibility Measure for Consonants in Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Estimation of Intelligibility Measure for Consonants in Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-5|PAPER Thu-3-3-5 — Privacy Guarantees for De-Identifying Text Transformations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy Guarantees for De-Identifying Text Transformations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-9|PAPER Wed-SS-1-4-9 — Deep Attentive End-to-End Continuous Breath Sensing from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attentive End-to-End Continuous Breath Sensing from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1801.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-2|PAPER Thu-3-9-2 — An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2696.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-7|PAPER Thu-2-3-7 — Phonetic Entrainment in Cooperative Dialogues: A Case of Russian]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Entrainment in Cooperative Dialogues: A Case of Russian</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4009.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-5|PAPER Mon-1-12-5 — CATOTRON — A Neural Text-to-Speech System in Catalan]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CATOTRON — A Neural Text-to-Speech System in Catalan</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1263.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-5|PAPER Tue-1-4-5 — Does French Listeners’ Ability to Use Accentual Information at the Word Level Depend on the Ear of Presentation?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does French Listeners’ Ability to Use Accentual Information at the Word Level Depend on the Ear of Presentation?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-9|PAPER Mon-SS-2-6-9 — Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-5|PAPER Wed-3-3-5 — Speaker Discrimination in Humans and Machines: Effects of Speaking Style Variability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Discrimination in Humans and Machines: Effects of Speaking Style Variability</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-6|PAPER Thu-2-7-6 — Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2935.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-6|PAPER Thu-2-11-6 — Visual Speech In Real Noisy Environments (VISION): A Novel Benchmark Dataset and Deep Learning-Based Baseline System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Visual Speech In Real Noisy Environments (VISION): A Novel Benchmark Dataset and Deep Learning-Based Baseline System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3147.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-10|PAPER Mon-2-8-10 — SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-5|PAPER Thu-3-9-5 — Classification of Manifest Huntington Disease Using Vowel Distortion Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Classification of Manifest Huntington Disease Using Vowel Distortion Measures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3050.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-9|PAPER Mon-1-4-9 — Nonlinear ISA with Auxiliary Variables for Learning Speech Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear ISA with Auxiliary Variables for Learning Speech Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1857.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-5|PAPER Wed-2-12-5 — Multi-Task Learning for Voice Related Recognition Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning for Voice Related Recognition Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2683.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-2|PAPER Tue-1-4-2 — Categorization of Whistled Consonants by French Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Categorization of Whistled Consonants by French Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2697.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-3|PAPER Tue-1-4-3 — Whistled Vowel Identification by French Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whistled Vowel Identification by French Listeners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-6|PAPER Mon-1-4-6 — Enhancing Formant Information in Spectrographic Display of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Formant Information in Spectrographic Display of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1497.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-3|PAPER Mon-1-8-3 — Anti-Aliasing Regularization in Stacking Layers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Aliasing Regularization in Stacking Layers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1991.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-5|PAPER Wed-3-9-5 — Quantization Aware Training with Absolute-Cosine Regularization for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantization Aware Training with Absolute-Cosine Regularization for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-1|PAPER Wed-1-2-1 — Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1750.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-4|PAPER Thu-2-8-4 — Do End-to-End Speech Recognition Models Care About Context?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do End-to-End Speech Recognition Models Care About Context?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-3|PAPER Wed-1-11-3 — A Recursive Network with Dynamic Attention for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Recursive Network with Dynamic Attention for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1601.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-6|PAPER Thu-3-1-6 — Prediction of Sleepiness Ratings from Voice by Man and Machine]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prediction of Sleepiness Ratings from Voice by Man and Machine</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1855.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-6|PAPER Wed-2-8-6 — A New Training Pipeline for an Improved Neural Transducer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Training Pipeline for an Improved Neural Transducer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-7|PAPER Mon-3-7-7 — Learning Fast Adaptation on Cross-Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Fast Adaptation on Cross-Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-10|PAPER Mon-1-10-10 — FT SPEECH: Danish Parliament Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FT SPEECH: Danish Parliament Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-1|PAPER Mon-3-3-1 — Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-4|PAPER Mon-3-3-4 — ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-7|PAPER Mon-3-2-7 — Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2720.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-6|PAPER Tue-SS-1-6-6 — Speech Pseudonymisation Assessment Using Voice Similarity Matrices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Pseudonymisation Assessment Using Voice Similarity Matrices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-3|PAPER Wed-SS-1-4-3 — Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3115.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-9|PAPER Wed-2-4-9 — Unsupervised Audio Source Separation Using Generative Priors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Audio Source Separation Using Generative Priors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-10|PAPER Wed-1-7-10 — Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-5|PAPER Wed-2-8-5 — Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2365.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-5|PAPER Wed-3-2-5 — Towards Speech Robustness for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Speech Robustness for Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-4|PAPER Mon-1-8-4 — Towards a Competitive End-to-End Speech Recognition for CHiME-6 Dinner Party Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Competitive End-to-End Speech Recognition for CHiME-6 Dinner Party Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-3|PAPER Thu-3-2-3 — Investigating the Visual Lombard Effect with Gabor Based Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Visual Lombard Effect with Gabor Based Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1899.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-5|PAPER Thu-3-2-5 — Development of a Speech Quality Database Under Uncontrolled Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of a Speech Quality Database Under Uncontrolled Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1212.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-1|PAPER Thu-SS-1-6-1 — Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-4|PAPER Mon-2-2-4 — Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-10|PAPER Wed-2-8-10 — SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-8|PAPER Mon-1-7-8 — Spot the Conversation: Speaker Diarisation in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Conversation: Speaker Diarisation in the Wild</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-9|PAPER Wed-1-10-9 — Now You’re Speaking My Language: Visual Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Now You’re Speaking My Language: Visual Language Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-5|PAPER Thu-1-8-5 — Ensemble Approaches for Uncertainty in Spoken Language Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Approaches for Uncertainty in Spoken Language Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-8|PAPER Thu-2-8-8 — Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-5|PAPER Thu-3-7-5 — Transformer VQ-VAE for Unsupervised Unit Discovery and Speech Synthesis: ZeroSpeech 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer VQ-VAE for Unsupervised Unit Discovery and Speech Synthesis: ZeroSpeech 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-1|PAPER Thu-3-8-1 — Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-6|PAPER Thu-SS-1-6-6 — Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2231.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-7|PAPER Thu-SS-1-6-7 — Understanding Self-Attention of Self-Supervised Audio Transformers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding Self-Attention of Self-Supervised Audio Transformers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-5|PAPER Wed-SS-2-7-5 — Neural Speech Decoding for Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Decoding for Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-5|PAPER Thu-3-9-5 — Classification of Manifest Huntington Disease Using Vowel Distortion Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Classification of Manifest Huntington Disease Using Vowel Distortion Measures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3147.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-10|PAPER Mon-2-8-10 — SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-9|PAPER Wed-2-1-9 — Towards Automatic Assessment of Voice Disorders: A Clinical Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Automatic Assessment of Voice Disorders: A Clinical Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1841.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-4|PAPER Mon-2-3-4 — Sentence Level Estimation of Psycholinguistic Norms Using Joint Multidimensional Annotations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sentence Level Estimation of Psycholinguistic Norms Using Joint Multidimensional Annotations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-6|PAPER Tue-1-5-6 — Phase Based Spectro-Temporal Features for Building a Robust ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase Based Spectro-Temporal Features for Building a Robust ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-3|PAPER Mon-2-9-3 — Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-10|PAPER Tue-1-5-10 — Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1491.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-1|PAPER Wed-3-9-1 — Accurate Detection of Wake Word Start and End Using a CNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Accurate Detection of Wake Word Start and End Using a CNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-5|PAPER Thu-3-1-5 — Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1175.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-8|PAPER Thu-1-5-8 — Links Between Production and Perception of Glottalisation in Individual Australian English Speaker/Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Links Between Production and Perception of Glottalisation in Individual Australian English Speaker/Listeners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-1|PAPER Thu-2-6-1 — Multimodal Emotion Recognition Using Cross-Modal Attention and 1D Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Emotion Recognition Using Cross-Modal Attention and 1D Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-5|PAPER Thu-2-8-5 — Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2888.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-5|PAPER Tue-1-9-5 — Spotting the Traces of Depression in Read Speech: An Approach Based on Computational Paralinguistics and Social Signal Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spotting the Traces of Depression in Read Speech: An Approach Based on Computational Paralinguistics and Social Signal Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2299.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-2|PAPER Thu-3-3-2 — Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2833.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-10|PAPER Wed-SS-1-6-10 — The INESC-ID Multi-Modal System for the ADReSS 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INESC-ID Multi-Modal System for the ADReSS 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2433.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-7|PAPER Wed-2-1-7 — Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-4|PAPER Thu-3-9-4 — Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3094.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-8|PAPER Tue-1-1-8 — Self-Supervised Representations Improve End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Representations Improve End-to-End Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2322.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-6|PAPER Tue-1-10-6 — An Analysis of Prosodic Prominence Cues to Information Structure in Egyptian Arabic]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Analysis of Prosodic Prominence Cues to Information Structure in Egyptian Arabic</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-2|PAPER Thu-2-10-2 — Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2674.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-3|PAPER Thu-2-10-3 — Audiovisual Correspondence Learning in Humans and Machines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audiovisual Correspondence Learning in Humans and Machines</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1497.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-3|PAPER Mon-1-8-3 — Anti-Aliasing Regularization in Stacking Layers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Aliasing Regularization in Stacking Layers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2298.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-10|PAPER Tue-1-3-10 — Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2298.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-10|PAPER Tue-1-3-10 — Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3084.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-8|PAPER Thu-SS-1-6-8 — A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-9|PAPER Wed-SS-1-4-9 — Deep Attentive End-to-End Continuous Breath Sensing from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attentive End-to-End Continuous Breath Sensing from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-9|PAPER Wed-SS-1-4-9 — Deep Attentive End-to-End Continuous Breath Sensing from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attentive End-to-End Continuous Breath Sensing from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-1|PAPER Wed-3-2-1 — Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-3|PAPER Thu-1-7-3 — Training Speaker Enrollment Models by Network Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Speaker Enrollment Models by Network Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-9|PAPER Wed-1-7-9 — JukeBox: A Multilingual Singer Recognition Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JukeBox: A Multilingual Singer Recognition Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2910.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-9|PAPER Tue-1-7-9 — Understanding the Effect of Voice Quality and Accent on Talker Similarity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding the Effect of Voice Quality and Accent on Talker Similarity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-10|PAPER Tue-1-5-10 — Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-10|PAPER Wed-2-6-10 — MLS: A Large-Scale Multilingual Dataset for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLS: A Large-Scale Multilingual Dataset for Speech Research</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2831.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-5|PAPER Thu-3-5-5 — Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-10|PAPER Wed-1-3-10 — A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2663.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-8|PAPER Wed-2-11-8 — Generic Indic Text-to-Speech Synthesisers with Rapid Adaptation in an End-to-End Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generic Indic Text-to-Speech Synthesisers with Rapid Adaptation in an End-to-End Framework</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2731.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-3|PAPER Thu-3-7-3 — Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-4|PAPER Mon-1-4-4 — Automatic Analysis of Speech Prosody in Dutch]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Analysis of Speech Prosody in Dutch</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-3|PAPER Wed-SS-1-6-3 — To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1827.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-7|PAPER Mon-1-9-7 — Multi-Modal Embeddings Using Multi-Task Learning for Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Embeddings Using Multi-Task Learning for Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1222.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-3|PAPER Mon-3-10-3 — Speaker Conditioned Acoustic-to-Articulatory Inversion Using x-Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Conditioned Acoustic-to-Articulatory Inversion Using x-Vectors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2259.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-4|PAPER Wed-2-10-4 — Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2708.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-7|PAPER Wed-2-10-7 — Attention and Encoder-Decoder Based Models for Transforming Articulatory Movements at Different Speaking Rates]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention and Encoder-Decoder Based Models for Transforming Articulatory Movements at Different Speaking Rates</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1323.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-1|PAPER Wed-3-10-1 — Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-2|PAPER Wed-3-10-2 — Non-Parallel Emotion Conversion Using a Deep-Generative Hybrid Network and an Adversarial Pair Discriminator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Emotion Conversion Using a Deep-Generative Hybrid Network and an Adversarial Pair Discriminator</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3147.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-10|PAPER Mon-2-8-10 — SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2654.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-7|PAPER Wed-2-11-7 — On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-3|PAPER Mon-2-9-3 — Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-9|PAPER Wed-3-9-9 — Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-4|PAPER Mon-2-8-4 — Memory Controlled Sequential Self Attention for Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Memory Controlled Sequential Self Attention for Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1251.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-1|PAPER Thu-2-9-1 — CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-5|PAPER Mon-3-9-5 — Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-4|PAPER Wed-SS-2-3-4 — “This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-8|PAPER Mon-1-7-8 — Spot the Conversation: Speaker Diarisation in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Conversation: Speaker Diarisation in the Wild</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2649.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-7|PAPER Wed-1-8-7 — Risk Forecasting from Earnings Calls Acoustics and Network Correlations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Risk Forecasting from Earnings Calls Acoustics and Network Correlations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-2|PAPER Wed-2-9-2 — Hearing-Impaired Bio-Inspired Cochlear Models for Real-Time Auditory Applications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hearing-Impaired Bio-Inspired Cochlear Models for Real-Time Auditory Applications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1497.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-3|PAPER Mon-1-8-3 — Anti-Aliasing Regularization in Stacking Layers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Aliasing Regularization in Stacking Layers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-9|PAPER Wed-1-7-9 — JukeBox: A Multilingual Singer Recognition Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JukeBox: A Multilingual Singer Recognition Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2730.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-5|PAPER Wed-SS-1-12-5 — A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-6|PAPER Wed-SS-1-12-6 — PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1740.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-7|PAPER Mon-3-3-7 — End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2909.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-4|PAPER Thu-3-8-4 — Efficient MDI Adaptation for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient MDI Adaptation for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1753.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-2|PAPER Tue-1-3-2 — ASR Error Correction with Augmented Transformer for Entity Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Error Correction with Augmented Transformer for Entity Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-9|PAPER Thu-2-6-9 — Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1578.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-3|PAPER Thu-3-11-3 — Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-4|PAPER Mon-3-8-4 — Stochastic Talking Face Generation Using Latent Distribution Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stochastic Talking Face Generation Using Latent Distribution Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-2|PAPER Wed-2-8-2 — Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2056.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-2|PAPER Thu-1-2-2 — Leveraging Unlabeled Speech for Sequence Discriminative Training of Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Unlabeled Speech for Sequence Discriminative Training of Acoustic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3174.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-6|PAPER Tue-1-8-6 — Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2561.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-4|PAPER Thu-2-11-4 — Learning Complex Spectral Mapping for Speech Enhancement with Improved Cross-Corpus Generalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Complex Spectral Mapping for Speech Enhancement with Improved Cross-Corpus Generalization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2734.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-4|PAPER Thu-3-3-4 — Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1261.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-7|PAPER Thu-2-4-7 — HRI-RNN: A User-Robot Dynamics-Oriented RNN for Engagement Decrease Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">HRI-RNN: A User-Robot Dynamics-Oriented RNN for Engagement Decrease Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-5|PAPER Mon-1-8-5 — End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2816.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-4|PAPER Thu-1-2-4 — End-to-End ASR with Adaptive Span Self-Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End ASR with Adaptive Span Self-Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2989.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-7|PAPER Thu-2-11-7 — Sparse Mixture of Local Experts for Efficient Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sparse Mixture of Local Experts for Efficient Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1963.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-1|PAPER Mon-2-9-1 — End-to-End Neural Transformer Based Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Neural Transformer Based Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1991.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-5|PAPER Wed-3-9-5 — Quantization Aware Training with Absolute-Cosine Regularization for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantization Aware Training with Absolute-Cosine Regularization for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2929.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-7|PAPER Thu-2-6-7 — Semantic Complexity in End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Complexity in End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1591.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-8|PAPER Mon-3-3-8 — Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1524.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-6|PAPER Thu-3-8-6 — Language Model Data Augmentation Based on Text Domain Transfer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Model Data Augmentation Based on Text Domain Transfer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1365.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-3|PAPER Wed-3-8-3 — Similarity-and-Independence-Aware Beamformer: Method for Target Source Extraction Using Magnitude Spectrogram as Reference]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Similarity-and-Independence-Aware Beamformer: Method for Target Source Extraction Using Magnitude Spectrogram as Reference</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-4|PAPER Tue-SS-1-6-4 — A Comparative Study of Speech Anonymization Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Study of Speech Anonymization Metrics</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-9|PAPER Wed-1-7-9 — JukeBox: A Multilingual Singer Recognition Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JukeBox: A Multilingual Singer Recognition Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-9|PAPER Thu-3-2-9 — Effect of Spectral Complexity Reduction and Number of Instruments on Musical Enjoyment with Cochlear Implants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effect of Spectral Complexity Reduction and Number of Instruments on Musical Enjoyment with Cochlear Implants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-4|PAPER Thu-1-4-4 — A Noise Robust Technique for Detecting Vowels in Speech Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Noise Robust Technique for Detecting Vowels in Speech Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-9|PAPER Mon-2-11-9 — Iterative Pseudo-Labeling for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Pseudo-Labeling for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2831.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-5|PAPER Thu-3-5-5 — Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-4|PAPER Tue-1-10-4 — Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2829.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-9|PAPER Thu-SS-1-6-9 — Automatic Speech Recognition for ILSE-Interviews: Longitudinal Conversational Speech Recordings Covering Aging and Cognitive Decline]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Speech Recognition for ILSE-Interviews: Longitudinal Conversational Speech Recordings Covering Aging and Cognitive Decline</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2326.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-6|PAPER Thu-1-4-6 — VOP Detection in Variable Speech Rate Condition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VOP Detection in Variable Speech Rate Condition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-6|PAPER Mon-1-4-6 — Enhancing Formant Information in Spectrographic Display of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Formant Information in Spectrographic Display of Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2462.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-8|PAPER Thu-3-11-8 — Instantaneous Time Delay Estimation of Broadband Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Instantaneous Time Delay Estimation of Broadband Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2462.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-8|PAPER Thu-3-11-8 — Instantaneous Time Delay Estimation of Broadband Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Instantaneous Time Delay Estimation of Broadband Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2665.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-6|PAPER Wed-2-9-6 — An Open Source Implementation of ITU-T Recommendation P.808 with Validation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Open Source Implementation of ITU-T Recommendation P.808 with Validation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-3|PAPER Mon-2-9-3 — Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-7|PAPER Mon-1-11-7 — Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2433.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-7|PAPER Wed-2-1-7 — Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-4|PAPER Thu-3-9-4 — Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4014.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-7|PAPER Mon-1-12-7 — VoiceID on the Fly: A Speaker Recognition System that Learns from Scratch]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceID on the Fly: A Speaker Recognition System that Learns from Scratch</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-3|PAPER Mon-3-1-3 — Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-9|PAPER Wed-SS-1-6-9 — Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2919.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-4|PAPER Thu-3-5-4 — Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-10|PAPER Mon-1-10-10 — FT SPEECH: Danish Parliament Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FT SPEECH: Danish Parliament Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2353.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-6|PAPER Mon-2-4-6 — Microprosodic Variability in Plosives in German and Austrian German]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Microprosodic Variability in Plosives in German and Austrian German</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2322.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-6|PAPER Tue-1-10-6 — An Analysis of Prosodic Prominence Cues to Information Structure in Egyptian Arabic]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Analysis of Prosodic Prominence Cues to Information Structure in Egyptian Arabic</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3050.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-9|PAPER Mon-1-4-9 — Nonlinear ISA with Auxiliary Variables for Learning Speech Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear ISA with Auxiliary Variables for Learning Speech Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-8|PAPER Thu-1-2-8 — Emitting Word Timings with End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emitting Word Timings with End-to-End Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-9|PAPER Thu-3-5-9 — Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-4|PAPER Thu-3-7-4 — Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4009.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-5|PAPER Mon-1-12-5 — CATOTRON — A Neural Text-to-Speech System in Catalan]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CATOTRON — A Neural Text-to-Speech System in Catalan</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1508.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-10|PAPER Mon-2-9-10 — Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-10|PAPER Thu-2-1-10 — Phase-Aware Music Super-Resolution Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase-Aware Music Super-Resolution Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-7|PAPER Thu-3-1-7 — Tongue and Lip Motion Patterns in Alaryngeal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tongue and Lip Motion Patterns in Alaryngeal Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1196.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-1|PAPER Thu-3-6-1 — Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1192.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-2|PAPER Mon-2-5-2 — CAM: Uninteresting Speech Detector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CAM: Uninteresting Speech Detector</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1794.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-2|PAPER Mon-1-4-2 — Poetic Meter Classification Using i-Vector-MTF Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Poetic Meter Classification Using i-Vector-MTF Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2942.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-7|PAPER Tue-1-10-7 — Lexical Stress in Urdu]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lexical Stress in Urdu</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1598.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-10|PAPER Thu-3-6-10 — Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-10|PAPER Mon-3-4-10 — Deep Learning Based Open Set Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Open Set Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-3|PAPER Wed-SS-1-6-3 — To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2629.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-8|PAPER Wed-2-6-8 — Improving Unsupervised Sparsespeech Acoustic Models with Categorical Reparameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Unsupervised Sparsespeech Acoustic Models with Categorical Reparameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-2|PAPER Thu-3-7-2 — Vector-Quantized Neural Networks for Acoustic Unit Discovery in the ZeroSpeech 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Quantized Neural Networks for Acoustic Unit Discovery in the ZeroSpeech 2020 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2362.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-6|PAPER Thu-3-2-6 — Evaluating the Reliability of Acoustic Speech Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating the Reliability of Acoustic Speech Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-8|PAPER Wed-2-5-8 — Subband Kalman Filtering with DNN Estimated Parameters for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subband Kalman Filtering with DNN Estimated Parameters for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2785.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-8|PAPER Thu-2-4-8 — Neural Representations of Dialogical History for Improving Upcoming Turn Acoustic Parameters Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Representations of Dialogical History for Improving Upcoming Turn Acoustic Parameters Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-4|PAPER Mon-1-4-4 — Automatic Analysis of Speech Prosody in Dutch]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Analysis of Speech Prosody in Dutch</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-2|PAPER Wed-3-1-2 — An Effective End-to-End Modeling Approach for Mispronunciation Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective End-to-End Modeling Approach for Mispronunciation Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1616.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-3|PAPER Wed-3-1-3 — An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2134.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-3|PAPER Wed-2-10-3 — Discriminative Singular Spectrum Analysis for Bioacoustic Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Singular Spectrum Analysis for Bioacoustic Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-7|PAPER Mon-1-11-7 — Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-3|PAPER Tue-1-9-3 — Differences in Gradient Emotion Perception: Human vs. Alexa Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Differences in Gradient Emotion Perception: Human vs. Alexa Voices</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2701.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-1|PAPER Thu-2-3-1 — Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2631.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-4|PAPER Wed-SS-1-12-4 — Dual-Signal Transformation LSTM Network for Real-Time Noise Suppression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Signal Transformation LSTM Network for Real-Time Noise Suppression</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2014.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-6|PAPER Wed-3-10-6 — Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3046.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-13|PAPER Wed-3-10-13 — The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2380.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-3|PAPER Thu-3-3-3 — Hide and Speak: Towards Deep Neural Networks for Speech Steganography]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hide and Speak: Towards Deep Neural Networks for Speech Steganography</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-4|PAPER Mon-2-2-4 — Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-10|PAPER Wed-2-8-10 — SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1616.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-3|PAPER Wed-3-1-3 — An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1307.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-1|PAPER Thu-1-10-1 — A Semi-Blind Source Separation Approach for Speech Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Semi-Blind Source Separation Approach for Speech Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-7|PAPER Mon-2-11-7 — Unsupervised Regularization-Based Adaptive Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Regularization-Based Adaptive Training for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1390.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-5|PAPER Mon-3-7-5 — Adaptive Speaker Normalization for CTC-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Speaker Normalization for CTC-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-3|PAPER Tue-1-2-3 — An Adaptive X-Vector Model for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive X-Vector Model for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-7|PAPER Thu-1-10-7 — Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1705.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-9|PAPER Mon-1-9-9 — Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1703.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-6|PAPER Wed-1-9-6 — Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-5|PAPER Wed-3-8-5 — Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1391.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-1|PAPER Thu-2-2-1 — Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1536.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-6|PAPER Thu-2-2-6 — Comparison of Glottal Source Parameter Values in Emotional Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Glottal Source Parameter Values in Emotional Vowels</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1225.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-9|PAPER Thu-2-11-9 — Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2396.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-3|PAPER Thu-3-9-3 — Hybrid Network Feature Extraction for Depression Assessment from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Network Feature Extraction for Depression Assessment from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1281.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-4|PAPER Mon-3-7-4 — Speech Transformer with Speaker Aware Persistent Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Transformer with Speaker Aware Persistent Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1163.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-1|PAPER Wed-2-11-1 — Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1716.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-6|PAPER Thu-3-10-6 — Universal Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Speech Transformer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1198.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-8|PAPER Thu-3-10-8 — Cross Attention with Monotonic Alignment for Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross Attention with Monotonic Alignment for Speech Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-4|PAPER Mon-1-10-4 — LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-10|PAPER Thu-2-1-10 — Phase-Aware Music Super-Resolution Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase-Aware Music Super-Resolution Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1633.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-6|PAPER Mon-1-3-6 — Cortical Oscillatory Hierarchy for Natural Sentence Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cortical Oscillatory Hierarchy for Natural Sentence Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1310.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-3|PAPER Mon-2-7-3 — Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-8|PAPER Mon-3-1-8 — Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3146.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-9|PAPER Wed-3-12-9 — Lip Graph Assisted Audio-Visual Speech Recognition Using Bidirectional Synchronous Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lip Graph Assisted Audio-Visual Speech Recognition Using Bidirectional Synchronous Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-3|PAPER Mon-1-4-3 — Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1277.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-9|PAPER Mon-2-5-9 — Joint Prediction of Punctuation and Disfluency in Speech Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Prediction of Punctuation and Disfluency in Speech Transcripts</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-1|PAPER Wed-3-1-1 — Automatic Scoring at Multi-Granularity for L2 Pronunciation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Scoring at Multi-Granularity for L2 Pronunciation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1284.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-2|PAPER Thu-2-9-2 — Joint Detection of Sentence Stress and Phrase Boundary for Prosody]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Detection of Sentence Stress and Phrase Boundary for Prosody</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2207.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-7|PAPER Thu-2-10-7 — A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1710.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-5|PAPER Mon-2-7-5 — GAZEV: GAN-Based Zero-Shot Voice Conversion Over Non-Parallel Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GAZEV: GAN-Based Zero-Shot Voice Conversion Over Non-Parallel Speech Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1706.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-4|PAPER Mon-3-11-4 — X-TaSNet: Robust and Accurate Time-Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">X-TaSNet: Robust and Accurate Time-Domain Speaker Extraction Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1644.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-5|PAPER Wed-2-2-5 — Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1934.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-3|PAPER Thu-2-4-3 — Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2701.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-1|PAPER Thu-2-3-1 — Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-8|PAPER Wed-1-8-8 — SpecMark: A Spectral Watermarking Framework for IP Protection of Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecMark: A Spectral Watermarking Framework for IP Protection of Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1869.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-1|PAPER Mon-2-1-1 — Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-4|PAPER Mon-2-1-4 — An Investigation of Cross-Cultural Semi-Supervised Learning for Continuous Affect Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Cross-Cultural Semi-Supervised Learning for Continuous Affect Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-6|PAPER Mon-2-1-6 — Augmenting Generative Adversarial Networks for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Generative Adversarial Networks for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2655.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-9|PAPER Mon-3-4-9 — Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-1|PAPER Tue-1-9-1 — Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1552.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-2|PAPER Wed-SS-1-4-2 — Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-9|PAPER Wed-SS-1-4-9 — Deep Attentive End-to-End Continuous Breath Sensing from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attentive End-to-End Continuous Breath Sensing from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-2|PAPER Wed-1-9-2 — Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-8|PAPER Wed-2-10-8 — Adventitious Respiratory Classification Using Attentive Residual Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adventitious Respiratory Classification Using Attentive Residual Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2365.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-5|PAPER Wed-3-2-5 — Towards Speech Robustness for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Speech Robustness for Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2531.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-1|PAPER Thu-3-1-1 — Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1801.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-2|PAPER Thu-3-9-2 — An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2396.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-3|PAPER Thu-3-9-3 — Hybrid Network Feature Extraction for Depression Assessment from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Network Feature Extraction for Depression Assessment from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-5|PAPER Wed-1-2-5 — Low Latency Speech Recognition Using End-to-End Prefetching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Speech Recognition Using End-to-End Prefetching</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-5|PAPER Mon-2-3-5 — Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2904.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-9|PAPER Tue-1-5-9 — Bandpass Noise Generation and Augmentation for Unified ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bandpass Noise Generation and Augmentation for Unified ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3040.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-9|PAPER Mon-1-10-9 — Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-4|PAPER Mon-1-2-4 — End-to-End Multi-Look Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Look Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-6|PAPER Mon-3-11-6 — A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2418.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-10|PAPER Wed-2-5-10 — Speaker-Conditional Chain Model for Speech Separation and Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Conditional Chain Model for Speech Separation and Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-1|PAPER Mon-3-4-1 — Neural Architecture Search on Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search on Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1733.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-3|PAPER Mon-2-1-3 — Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2585.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-8|PAPER Wed-3-2-8 — Attentive Convolutional Recurrent Neural Network Using Phoneme-Level Acoustic Representation for Rare Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentive Convolutional Recurrent Neural Network Using Phoneme-Level Acoustic Representation for Rare Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-2|PAPER Wed-3-3-2 — Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2224.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-4|PAPER Thu-2-1-4 — Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2256.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-6|PAPER Thu-3-11-6 — The Importance of Time-Frequency Averaging for Binaural Speaker Localization in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Importance of Time-Frequency Averaging for Binaural Speaker Localization in Reverberant Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-8|PAPER Mon-2-3-8 — An Evaluation of Manual and Semi-Automatic Laughter Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Evaluation of Manual and Semi-Automatic Laughter Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-4|PAPER Thu-3-7-4 — Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-5|PAPER Thu-2-1-5 — A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2123.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-7|PAPER Thu-1-11-7 — JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1058.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-3|PAPER Wed-3-9-3 — MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-7|PAPER Mon-2-8-7 — A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-8|PAPER Mon-1-5-8 — Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-10|PAPER Thu-3-11-10 — Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-5|PAPER Mon-2-3-5 — Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-3|PAPER Mon-2-12-3 — A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4012.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-7|PAPER Mon-2-12-7 — A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2624.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-8|PAPER Wed-3-8-8 — Meta Multi-Task Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta Multi-Task Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1797.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-8|PAPER Mon-2-2-8 — Continual Learning for Multi-Dialect Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Continual Learning for Multi-Dialect Acoustic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-2|PAPER Thu-2-10-2 — Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Cross-Lingual Speaker Verification with Domain-Balanced Hard Prototype Mining and Language-Dependent Score Normalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Speaker Verification with Domain-Balanced Hard Prototype Mining and Language-Dependent Score Normalization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2650.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-7|PAPER Thu-1-7-7 — ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-3|PAPER Mon-2-2-3 — Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-9|PAPER Thu-2-6-9 — Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2593.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-2|PAPER Thu-3-5-2 — Transliteration Based Data Augmentation for Training Multilingual ASR Acoustic Models in Low Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transliteration Based Data Augmentation for Training Multilingual ASR Acoustic Models in Low Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1843.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-3|PAPER Wed-1-2-3 — An Audio-Based Wakeword-Independent Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Audio-Based Wakeword-Independent Verification System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-7|PAPER Wed-1-2-7 — Building a Robust Word-Level Wakeword Verification Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Robust Word-Level Wakeword Verification Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-4|PAPER Wed-SS-1-6-4 — Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-2|PAPER Wed-2-11-2 — Multi-Lingual Multi-Speaker Text-to-Speech Synthesis for Voice Cloning with Online Speaker Enrollment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Lingual Multi-Speaker Text-to-Speech Synthesis for Voice Cloning with Online Speaker Enrollment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3135.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-4|PAPER Thu-3-1-4 — Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-4|PAPER Tue-SS-1-6-4 — A Comparative Study of Speech Anonymization Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Study of Speech Anonymization Metrics</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2103.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-6|PAPER Mon-1-5-6 — What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2439.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-2|PAPER Wed-SS-1-12-2 — INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising]]</div>|^<div class="cpauthorindexpersoncardpapertitle">INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-5|PAPER Wed-1-3-5 — Reformer-TTS: Neural Speech Synthesis with Reformer Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reformer-TTS: Neural Speech Synthesis with Reformer Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1779.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-8|PAPER Wed-1-9-8 — Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1805.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-8|PAPER Thu-3-9-8 — Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-3|PAPER Tue-1-8-3 — An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1465.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-5|PAPER Thu-3-8-5 — Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-3|PAPER Tue-SS-1-6-3 — X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2694.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-5|PAPER Mon-2-1-5 — Ensemble of Students Taught by Probabilistic Teachers to Improve Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble of Students Taught by Probabilistic Teachers to Improve Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2444.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-4|PAPER Tue-1-9-4 — The MSP-Conversation Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The MSP-Conversation Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2636.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-1|PAPER Wed-1-9-1 — An Efficient Temporal Modeling Approach for Speech Emotion Recognition by Mapping Varied Duration Sentences into Fixed Number of Chunks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Efficient Temporal Modeling Approach for Speech Emotion Recognition by Mapping Varied Duration Sentences into Fixed Number of Chunks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-4|PAPER Mon-1-4-4 — Automatic Analysis of Speech Prosody in Dutch]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Analysis of Speech Prosody in Dutch</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2748.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-6|PAPER Mon-3-9-6 — A Sound Engineering Approach to Near End Listening Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Sound Engineering Approach to Near End Listening Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2758.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-2|PAPER Thu-3-1-2 — Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-3|PAPER Mon-3-9-3 — Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-12|PAPER Thu-3-7-12 — Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-3|PAPER Tue-1-10-3 — Scaling Processes of Clause Chains in Pitjantjatjara]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Processes of Clause Chains in Pitjantjatjara</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-9|PAPER Thu-2-3-9 — Tone Variations in Regionally Accented Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tone Variations in Regionally Accented Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-7|PAPER Wed-3-3-7 — Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-10|PAPER Thu-1-4-10 — Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2842.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-7|PAPER Thu-1-8-7 — ASR-Based Evaluation and Feedback for Individualized Reading Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Based Evaluation and Feedback for Individualized Reading Practice</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-1|PAPER Thu-2-10-1 — Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-6|PAPER Tue-1-5-6 — Phase Based Spectro-Temporal Features for Building a Robust ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase Based Spectro-Temporal Features for Building a Robust ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-7|PAPER Thu-3-1-7 — Tongue and Lip Motion Patterns in Alaryngeal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tongue and Lip Motion Patterns in Alaryngeal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-5|PAPER Mon-1-1-5 — Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-1|PAPER Mon-1-3-1 — Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-10|PAPER Wed-2-2-10 — Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1330.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-2|PAPER Wed-3-9-2 — Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2483.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-10|PAPER Mon-3-11-10 — Speaker-Aware Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-1|PAPER Thu-2-11-1 — Speech Enhancement Based on Beamforming and Post-Filtering by Combining Phase Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Based on Beamforming and Post-Filtering by Combining Phase Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3094.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-8|PAPER Tue-1-1-8 — Self-Supervised Representations Improve End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Representations Improve End-to-End Speech Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2955.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-1|PAPER Thu-3-5-1 — Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-9|PAPER Mon-3-2-9 — Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-10|PAPER Thu-3-10-10 — Exploring Transformers for Large-Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Transformers for Large-Scale Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-9|PAPER Tue-1-3-9 — An Effective Domain Adaptive Post-Training Method for BERT in Response Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Domain Adaptive Post-Training Method for BERT in Response Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3174.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-6|PAPER Tue-1-8-6 — Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-5|PAPER Thu-2-8-5 — Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-3|PAPER Wed-3-2-3 — Competing Speaker Count Estimation on the Fusion of the Spectral and Spatial Embedding Space]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Competing Speaker Count Estimation on the Fusion of the Spectral and Spatial Embedding Space</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-7|PAPER Mon-2-8-7 — A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-8|PAPER Mon-2-8-8 — Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-5|PAPER Mon-3-4-5 — Acoustic Scene Analysis with Multi-Head Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Analysis with Multi-Head Attention Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-1|PAPER Wed-2-8-1 — Semi-Supervised ASR by End-to-End Self-Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised ASR by End-to-End Self-Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-2|PAPER Mon-1-2-2 — Neural Spatio-Temporal Beamformer for Target Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatio-Temporal Beamformer for Target Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-1|PAPER Mon-2-11-1 — Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-2|PAPER Mon-3-5-2 — Peking Opera Synthesis via Duration Informed Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Peking Opera Synthesis via Duration Informed Attention Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-1|PAPER Thu-3-2-1 — Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-5|PAPER Wed-SS-3-11-5 — The JD AI Speaker Verification System for the FFSVC 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JD AI Speaker Verification System for the FFSVC 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-10|PAPER Thu-3-11-10 — Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-8|PAPER Mon-1-5-8 — Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-8|PAPER Thu-2-10-8 — Cross-Linguistic Interaction Between Phonological Categorization and Orthography Predicts Prosodic Effects in the Acquisition of Portuguese Liquids by L1-Mandarin Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Linguistic Interaction Between Phonological Categorization and Orthography Predicts Prosodic Effects in the Acquisition of Portuguese Liquids by L1-Mandarin Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1900.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-6|PAPER Mon-1-2-6 — Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-1|PAPER Wed-1-5-1 — 1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1586.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-9|PAPER Thu-3-8-9 — Language Modeling for Speech Analytics in Under-Resourced Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Modeling for Speech Analytics in Under-Resourced Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-9|PAPER Wed-SS-1-6-9 — Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-5|PAPER Wed-2-8-5 — Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-1|PAPER Mon-SS-1-6-1 — Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1632.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-2|PAPER Mon-2-9-2 — Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-7|PAPER Wed-2-2-7 — Multi-Scale Convolution for Robust Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale Convolution for Robust Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-10|PAPER Wed-1-11-10 — NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1617.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-3|PAPER Mon-3-3-3 — Lite Audio-Visual Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lite Audio-Visual Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-3|PAPER Thu-3-4-3 — Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1755.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-3|PAPER Thu-3-6-3 — Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2028.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-5|PAPER Mon-3-11-5 — Listen, Watch and Understand at the Cocktail Party: Audio-Visual-Contextual Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen, Watch and Understand at the Cocktail Party: Audio-Visual-Contextual Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-7|PAPER Thu-1-10-7 — Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-2|PAPER Thu-1-11-2 — Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1615.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-7|PAPER Thu-2-9-7 — Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1315.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-8|PAPER Tue-1-8-8 — DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1260.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-4|PAPER Thu-1-10-4 — A Robust and Cascaded Acoustic Echo Cancellation Based on Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Robust and Cascaded Acoustic Echo Cancellation Based on Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1397.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-1|PAPER Mon-3-11-1 — SpEx+: A Complete Time Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpEx+: A Complete Time Domain Speaker Extraction Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1994.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-8|PAPER Thu-2-1-8 — Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-9|PAPER Thu-2-1-9 — Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-3|PAPER Wed-1-11-3 — A Recursive Network with Dynamic Attention for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Recursive Network with Dynamic Attention for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-8|PAPER Mon-2-9-8 — Towards an ASR Error Robust Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards an ASR Error Robust Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3231.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-8|PAPER Thu-2-3-8 — Prosodic Characteristics of Genuine and Mock (Im)polite Mandarin Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Characteristics of Genuine and Mock (Im)polite Mandarin Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-11|PAPER Wed-SS-1-4-11 — Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-1|PAPER Mon-1-1-1 — On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1292.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-2|PAPER Wed-1-5-2 — Low Latency End-to-End Streaming Speech Recognition with a Scout Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency End-to-End Streaming Speech Recognition with a Scout Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2371.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-7|PAPER Wed-2-4-7 — Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1454.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-5|PAPER Thu-1-10-5 — Generative Adversarial Network Based Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Network Based Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-1|PAPER Mon-2-11-1 — Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-2|PAPER Mon-3-5-2 — Peking Opera Synthesis via Duration Informed Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Peking Opera Synthesis via Duration Informed Attention Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1422.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-5|PAPER Mon-2-10-5 — Vector-Based Attentive Pooling for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Based Attentive Pooling for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1281.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-4|PAPER Mon-3-7-4 — Speech Transformer with Speaker Aware Persistent Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Transformer with Speaker Aware Persistent Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1716.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-6|PAPER Thu-3-10-6 — Universal Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Speech Transformer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1198.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-8|PAPER Thu-3-10-8 — Cross Attention with Monotonic Alignment for Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross Attention with Monotonic Alignment for Speech Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1896.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-5|PAPER Thu-2-6-5 — Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2213.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-5|PAPER Wed-1-11-5 — SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1688.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-8|PAPER Mon-1-9-8 — Using Speaker-Aligned Graph Memory Block in Multimodally Attentive Emotion Recognition Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speaker-Aligned Graph Memory Block in Multimodally Attentive Emotion Recognition Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1733.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-3|PAPER Mon-2-1-3 — Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2524.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-9|PAPER Mon-2-1-9 — Speech Representation Learning for Emotion Recognition Using End-to-End ASR with Factorized Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Representation Learning for Emotion Recognition Using End-to-End ASR with Factorized Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2585.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-8|PAPER Wed-3-2-8 — Attentive Convolutional Recurrent Neural Network Using Phoneme-Level Acoustic Representation for Rare Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentive Convolutional Recurrent Neural Network Using Phoneme-Level Acoustic Representation for Rare Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-2|PAPER Wed-3-3-2 — Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1714.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-7|PAPER Thu-2-2-7 — Learning to Recognize Per-Rater’s Emotion Perception Using Co-Rater Training Strategy with Soft and Hard Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Recognize Per-Rater’s Emotion Perception Using Co-Rater Training Strategy with Soft and Hard Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1570.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-1|PAPER Thu-2-4-1 — SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-2|PAPER Thu-2-4-2 — An Audio-Enriched BERT-Based Framework for Spoken Multiple-Choice Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Audio-Enriched BERT-Based Framework for Spoken Multiple-Choice Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-7|PAPER Mon-3-1-7 — Improving Code-Switching Language Modeling with Artificially Generated Texts Using Cycle-Consistent Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Code-Switching Language Modeling with Artificially Generated Texts Using Cycle-Consistent Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-7|PAPER Mon-2-8-7 — A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-8|PAPER Mon-2-8-8 — Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-6|PAPER Mon-2-11-6 — On Front-End Gain Invariant Modeling for Wake Word Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Front-End Gain Invariant Modeling for Wake Word Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1400.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-2|PAPER Wed-1-11-2 — Incorporating Broad Phonetic Information for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Broad Phonetic Information for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1900.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-6|PAPER Mon-1-2-6 — Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-10|PAPER Mon-1-2-10 — A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-6|PAPER Mon-3-4-6 — Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-7|PAPER Mon-3-4-7 — An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2472.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-5|PAPER Thu-2-2-5 — Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-2|PAPER Thu-2-11-2 — A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2836.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-1|PAPER Wed-2-1-1 — The Implication of Sound Level on Spatial Selective Auditory Attention for Cochlear Implant Users: Behavioral and Electrophysiological Measurement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Implication of Sound Level on Spatial Selective Auditory Attention for Cochlear Implant Users: Behavioral and Electrophysiological Measurement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2079.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-6|PAPER Wed-1-5-6 — Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2928.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-4|PAPER Thu-3-10-4 — Transformer-Based Long-Context End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer-Based Long-Context End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1261.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-7|PAPER Thu-2-4-7 — HRI-RNN: A User-Robot Dynamics-Oriented RNN for Engagement Decrease Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">HRI-RNN: A User-Robot Dynamics-Oriented RNN for Engagement Decrease Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1281.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-4|PAPER Mon-3-7-4 — Speech Transformer with Speaker Aware Persistent Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Transformer with Speaker Aware Persistent Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1716.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-6|PAPER Thu-3-10-6 — Universal Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Speech Transformer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1198.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-8|PAPER Thu-3-10-8 — Cross Attention with Monotonic Alignment for Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross Attention with Monotonic Alignment for Speech Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2490.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-3|PAPER Mon-1-3-3 — Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2629.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-8|PAPER Wed-2-6-8 — Improving Unsupervised Sparsespeech Acoustic Models with Categorical Reparameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Unsupervised Sparsespeech Acoustic Models with Categorical Reparameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-4|PAPER Mon-3-3-4 — ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-13|PAPER Thu-2-9-13 — Interactive Text-to-Speech System via Joint Style Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interactive Text-to-Speech System via Joint Style Analysis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-4|PAPER Wed-SS-2-7-4 — Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1750.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-4|PAPER Thu-2-8-4 — Do End-to-End Speech Recognition Models Care About Context?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do End-to-End Speech Recognition Models Care About Context?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-5|PAPER Mon-3-9-5 — Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2480.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-6|PAPER Wed-3-12-6 — Should we Hard-Code the Recurrence Concept or Learn it Instead ? Exploring the Transformer Architecture for Audio-Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Should we Hard-Code the Recurrence Concept or Learn it Instead ? Exploring the Transformer Architecture for Audio-Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1491.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-1|PAPER Wed-3-9-1 — Accurate Detection of Wake Word Start and End Using a CNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Accurate Detection of Wake Word Start and End Using a CNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2239.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-6|PAPER Wed-2-1-6 — Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2519.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-7|PAPER Wed-3-2-7 — Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1766.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-3|PAPER Thu-1-8-3 — Targeted Content Feedback in Spoken Language Learning and Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Targeted Content Feedback in Spoken Language Learning and Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-10|PAPER Wed-1-7-10 — Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-1|PAPER Mon-3-4-1 — Neural Architecture Search on Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search on Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-3|PAPER Thu-3-2-3 — Investigating the Visual Lombard Effect with Gabor Based Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Visual Lombard Effect with Gabor Based Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-8|PAPER Mon-2-8-8 — Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1733.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-3|PAPER Mon-2-1-3 — Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-4|PAPER Wed-1-5-4 — Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-6|PAPER Wed-2-11-6 — Tone Learning in Low-Resource Bilingual TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tone Learning in Low-Resource Bilingual TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-1|PAPER Mon-2-11-1 — Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-3|PAPER Mon-2-11-3 — Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1344.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-6|PAPER Thu-1-3-6 — Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2079.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-6|PAPER Wed-1-5-6 — Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1754.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-3|PAPER Wed-2-11-3 — Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1737.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-4|PAPER Thu-1-11-4 — Bi-Level Speaker Supervision for One-Shot Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Level Speaker Supervision for One-Shot Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-6|PAPER Thu-3-4-6 — Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-1|PAPER Thu-1-11-1 — From Speaker Verification to Multispeaker Speech Synthesis, Deep Transfer with Feedback Constraint]]</div>|^<div class="cpauthorindexpersoncardpapertitle">From Speaker Verification to Multispeaker Speech Synthesis, Deep Transfer with Feedback Constraint</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2215.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-6|PAPER Mon-2-3-6 — Confidence Measures in Encoder-Decoder Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measures in Encoder-Decoder Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-2|PAPER Mon-1-10-2 — Developing an Open-Source Corpus of Yoruba Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing an Open-Source Corpus of Yoruba Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-5|PAPER Wed-SS-1-4-5 — Phonetic, Frame Clustering and Intelligibility Analyses for the INTERSPEECH 2020 ComParE Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic, Frame Clustering and Intelligibility Analyses for the INTERSPEECH 2020 ComParE Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-3|PAPER Wed-SS-1-4-3 — Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2239.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-6|PAPER Wed-2-1-6 — Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1697.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-3|PAPER Mon-3-11-3 — Multimodal Target Speech Separation with Voice and Face References]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Target Speech Separation with Voice and Face References</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-4|PAPER Wed-2-6-4 — Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-10|PAPER Mon-2-5-10 — Focal Loss for Punctuation Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Focal Loss for Punctuation Prediction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-3|PAPER Wed-1-11-3 — A Recursive Network with Dynamic Attention for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Recursive Network with Dynamic Attention for Monaural Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-5|PAPER Wed-3-8-5 — Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1225.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-9|PAPER Thu-2-11-9 — Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-6|PAPER Mon-1-10-6 — CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1344.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-6|PAPER Thu-1-3-6 — Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1984.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-5|PAPER Mon-3-8-5 — Speech-to-Singing Conversion Based on Boundary Equilibrium GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-to-Singing Conversion Based on Boundary Equilibrium GAN</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-4|PAPER Thu-3-4-4 — VQVC+: One-Shot Voice Conversion by Vector Quantization and U-Net Architecture]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVC+: One-Shot Voice Conversion by Vector Quantization and U-Net Architecture</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-2|PAPER Mon-2-8-2 — Environmental Sound Classification with Parallel Temporal-Spectral Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environmental Sound Classification with Parallel Temporal-Spectral Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2512.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-8|PAPER Mon-2-7-8 — Attention-Based Speaker Embeddings for One-Shot Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Based Speaker Embeddings for One-Shot Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2550.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-6|PAPER Thu-1-8-6 — Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2566.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-9|PAPER Thu-2-9-9 — Discriminative Method to Extract Coarse Prosodic Structure and its Application for Statistical Phrase/Accent Command Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Method to Extract Coarse Prosodic Structure and its Application for Statistical Phrase/Accent Command Estimation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1232.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-2|PAPER Thu-3-4-2 — Nonparallel Training of Exemplar-Based Voice Conversion System Using INCA-Based Alignment Technique]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Training of Exemplar-Based Voice Conversion System Using INCA-Based Alignment Technique</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3135.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-4|PAPER Thu-3-1-4 — Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2861.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-10|PAPER Thu-2-9-10 — Controllable Neural Text-to-Speech Synthesis Using Intuitive Prosodic Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controllable Neural Text-to-Speech Synthesis Using Intuitive Prosodic Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-8|PAPER Wed-1-10-8 — Perception and Production of Mandarin Initial Stops by Native Urdu Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception and Production of Mandarin Initial Stops by Native Urdu Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2123.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-7|PAPER Thu-1-11-7 — JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-4|PAPER Mon-1-2-4 — End-to-End Multi-Look Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Look Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-9|PAPER Mon-2-2-9 — SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-3|PAPER Thu-SS-1-6-3 — Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-9|PAPER Wed-2-5-9 — Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2819.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-3|PAPER Thu-3-1-3 — Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-2|PAPER Tue-1-8-2 — Transformer with Bidirectional Decoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer with Bidirectional Decoder for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2433.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-7|PAPER Wed-2-1-7 — Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-4|PAPER Thu-3-9-4 — Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2985.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-12|PAPER Thu-2-9-12 — Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-1|PAPER Thu-1-1-1 — Vocoder-Based Speech Synthesis from Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocoder-Based Speech Synthesis from Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-5|PAPER Mon-2-2-5 — PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-5|PAPER Tue-1-5-5 — An Alternative to MFCCs for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Alternative to MFCCs for ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-1|PAPER Thu-1-3-1 — Neural Language Modeling with Implicit Cache Pointers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Language Modeling with Implicit Cache Pointers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1811.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-4|PAPER Thu-2-6-4 — Wake Word Detection with Alignment-Free Lattice-Free MMI]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Wake Word Detection with Alignment-Free Lattice-Free MMI</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2919.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-4|PAPER Thu-3-5-4 — Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2909.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-4|PAPER Thu-3-8-4 — Efficient MDI Adaptation for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient MDI Adaptation for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-2|PAPER Thu-2-10-2 — Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1251.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-1|PAPER Thu-2-9-1 — CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2514.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-8|PAPER Thu-3-8-8 — Improving Speech Recognition of Compound-Rich Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition of Compound-Rich Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-1|PAPER Mon-SS-1-6-1 — Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-2|PAPER Thu-1-8-2 — Mixtures of Deep Neural Experts for Automated Speech Scoring]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixtures of Deep Neural Experts for Automated Speech Scoring</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2696.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-7|PAPER Thu-2-3-7 — Phonetic Entrainment in Cooperative Dialogues: A Case of Russian]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Entrainment in Cooperative Dialogues: A Case of Russian</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-4|PAPER Wed-SS-2-3-4 — “This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-7|PAPER Wed-SS-1-4-7 — Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2897.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-9|PAPER Thu-1-2-9 — Low-Latency Sequence-to-Sequence Speech Recognition and Translation by Partial Hypothesis Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Latency Sequence-to-Sequence Speech Recognition and Translation by Partial Hypothesis Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-1|PAPER Thu-1-7-1 — Dynamic Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Margin Softmax Loss for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1644.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-5|PAPER Wed-2-2-5 — Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-5|PAPER Wed-SS-2-7-5 — Neural Speech Decoding for Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Decoding for Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3078.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-7|PAPER Tue-1-1-7 — Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-5|PAPER Thu-3-3-5 — Privacy Guarantees for De-Identifying Text Transformations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy Guarantees for De-Identifying Text Transformations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2152.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-5|PAPER Mon-2-8-5 — Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2845.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-8|PAPER Thu-1-8-8 — Domain Adversarial Neural Networks for Dysarthric Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adversarial Neural Networks for Dysarthric Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-5|PAPER Wed-1-2-5 — Low Latency Speech Recognition Using End-to-End Prefetching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Speech Recognition Using End-to-End Prefetching</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-8|PAPER Thu-1-2-8 — Emitting Word Timings with End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emitting Word Timings with End-to-End Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2215.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-6|PAPER Mon-2-3-6 — Confidence Measures in Encoder-Decoder Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measures in Encoder-Decoder Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-4|PAPER Wed-SS-1-6-4 — Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-4|PAPER Wed-SS-2-7-4 — Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-5|PAPER Wed-SS-2-7-5 — Neural Speech Decoding for Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Decoding for Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-2|PAPER Wed-2-9-2 — Hearing-Impaired Bio-Inspired Cochlear Models for Real-Time Auditory Applications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hearing-Impaired Bio-Inspired Cochlear Models for Real-Time Auditory Applications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-5|PAPER Mon-1-1-5 — Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-5|PAPER Mon-3-3-5 — A Deep Learning Approach to Active Noise Control]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Learning Approach to Active Noise Control</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2952.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-1|PAPER Wed-3-7-1 — Noisy-Reverberant Speech Enhancement Using DenseUNet with Time-Frequency Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noisy-Reverberant Speech Enhancement Using DenseUNet with Time-Frequency Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2561.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-4|PAPER Thu-2-11-4 — Learning Complex Spectral Mapping for Speech Enhancement with Improved Cross-Corpus Generalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Complex Spectral Mapping for Speech Enhancement with Improved Cross-Corpus Generalization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-7|PAPER Thu-3-2-7 — Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-3|PAPER Mon-1-4-3 — Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-9|PAPER Thu-2-3-9 — Tone Variations in Regionally Accented Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tone Variations in Regionally Accented Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-7|PAPER Wed-SS-1-4-7 — Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1708.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-1|PAPER Mon-1-11-1 — Metric Learning Loss Functions to Reduce Domain Mismatch in the x-Vector Space for Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metric Learning Loss Functions to Reduce Domain Mismatch in the x-Vector Space for Language Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-2|PAPER Mon-1-12-2 — Kaldi-Web: An Installation-Free, On-Device Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Kaldi-Web: An Installation-Free, On-Device Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2742.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-9|PAPER Mon-3-7-9 — Achieving Multi-Accent ASR via Unsupervised Acoustic Model Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Achieving Multi-Accent ASR via Unsupervised Acoustic Model Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-5|PAPER Tue-1-10-5 — Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1297.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-3|PAPER Thu-2-9-3 — Transfer Learning of the Expressivity Using FLOW Metric Learning in Multispeaker Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning of the Expressivity Using FLOW Metric Learning in Multispeaker Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-5|PAPER Wed-2-11-5 — Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-10|PAPER Thu-2-6-10 — Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-4|PAPER Wed-2-11-4 — Phonological Features for 0-Shot Multilingual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Features for 0-Shot Multilingual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-6|PAPER Wed-3-4-6 — Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3174.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-6|PAPER Tue-1-8-6 — Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-5|PAPER Thu-2-8-5 — Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2476.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-8|PAPER Mon-3-4-8 — Attention-Driven Projections for Soundscape Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Driven Projections for Soundscape Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-3|PAPER Thu-3-8-3 — Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3147.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-10|PAPER Mon-2-8-10 — SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-9|PAPER Wed-1-2-9 — Neural Architecture Search for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-2|PAPER Mon-1-3-2 — Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-5|PAPER Wed-2-1-5 — EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-3|PAPER Mon-2-5-3 — Mixed Case Contextual ASR Using Capitalization Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixed Case Contextual ASR Using Capitalization Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1627.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-9|PAPER Tue-1-9-9 — An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-7|PAPER Mon-1-11-7 — Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-5|PAPER Thu-3-3-5 — Privacy Guarantees for De-Identifying Text Transformations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy Guarantees for De-Identifying Text Transformations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2476.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-8|PAPER Mon-3-4-8 — Attention-Driven Projections for Soundscape Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Driven Projections for Soundscape Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1508.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-10|PAPER Mon-2-9-10 — Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-4|PAPER Mon-2-11-4 — A Federated Approach in Training Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Federated Approach in Training Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-12|PAPER Wed-3-10-12 — GAN-Based Data Generation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GAN-Based Data Generation for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-5|PAPER Thu-SS-1-6-5 — Sequence-Level Self-Learning with Multiple Hypotheses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-Level Self-Learning with Multiple Hypotheses</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2322.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-6|PAPER Tue-1-10-6 — An Analysis of Prosodic Prominence Cues to Information Structure in Egyptian Arabic]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Analysis of Prosodic Prominence Cues to Information Structure in Egyptian Arabic</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-3|PAPER Tue-1-5-3 — A Deep 2D Convolutional Network for Waveform-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep 2D Convolutional Network for Waveform-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2656.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-7|PAPER Tue-1-5-7 — Deep Scattering Power Spectrum Features for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Scattering Power Spectrum Features for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2786.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-10|PAPER Mon-1-5-10 — Speaker Conditional WaveRNN: Towards Universal Neural Vocoder for Unseen Speaker and Recording Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Conditional WaveRNN: Towards Universal Neural Vocoder for Unseen Speaker and Recording Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2793.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-7|PAPER Mon-3-9-7 — Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2599.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-5|PAPER Thu-3-6-5 — Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-12|PAPER Wed-SS-1-4-12 — Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1563.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-2|PAPER Mon-3-3-2 — SEANet: A Multi-Modal Speech Enhancement Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SEANet: A Multi-Modal Speech Enhancement Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2845.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-8|PAPER Thu-1-8-8 — Domain Adversarial Neural Networks for Dysarthric Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adversarial Neural Networks for Dysarthric Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1481.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-4|PAPER Wed-2-1-4 — Investigation of Phase Distortion on Perceived Speech Quality for Hearing-Impaired Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Phase Distortion on Perceived Speech Quality for Hearing-Impaired Listeners</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2809.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-8|PAPER Thu-3-2-8 — A Pyramid Recurrent Network for Predicting Crowdsourced Speech-Quality Ratings of Real-World Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Pyramid Recurrent Network for Predicting Crowdsourced Speech-Quality Ratings of Real-World Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-5|PAPER Thu-2-1-5 — A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-2|PAPER Wed-2-4-2 — On Synthesis for Supervised Monaural Speech Separation in Time Domain]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Synthesis for Supervised Monaural Speech Separation in Time Domain</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2205.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-6|PAPER Wed-2-4-6 — Dual-Path Transformer Network: Direct Context-Aware Modeling for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Path Transformer Network: Direct Context-Aware Modeling for End-to-End Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-6|PAPER Wed-3-1-6 — ASR-Free Pronunciation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Free Pronunciation Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2542.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-9|PAPER Wed-3-5-9 — Neural Discriminant Analysis for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Discriminant Analysis for Deep Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2562.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-6|PAPER Thu-1-7-6 — Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-2|PAPER Mon-1-2-2 — Neural Spatio-Temporal Beamformer for Target Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatio-Temporal Beamformer for Target Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-4|PAPER Mon-1-2-4 — End-to-End Multi-Look Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Look Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-1|PAPER Mon-2-11-1 — Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-2|PAPER Mon-3-5-2 — Peking Opera Synthesis via Duration Informed Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Peking Opera Synthesis via Duration Informed Attention Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2152.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-5|PAPER Mon-2-8-5 — Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1246.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-7|PAPER Mon-2-9-7 — Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1089.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-8|PAPER Mon-1-8-8 — Neural Speech Separation Using Spatially Distributed Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Separation Using Spatially Distributed Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-9|PAPER Tue-1-3-9 — An Effective Domain Adaptive Post-Training Method for BERT in Response Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Domain Adaptive Post-Training Method for BERT in Response Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2359.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-5|PAPER Wed-3-12-5 — TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1310.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-3|PAPER Mon-2-7-3 — Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2572.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-3|PAPER Thu-2-2-3 — Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-4|PAPER Wed-1-3-4 — Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-9|PAPER Tue-1-3-9 — An Effective Domain Adaptive Post-Training Method for BERT in Response Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Domain Adaptive Post-Training Method for BERT in Response Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1536.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-6|PAPER Thu-2-2-6 — Comparison of Glottal Source Parameter Values in Emotional Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Glottal Source Parameter Values in Emotional Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1542.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-5|PAPER Thu-3-4-5 — Cotatron: Transcription-Guided Speech Encoder for Any-to-Many Voice Conversion Without Parallel Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cotatron: Transcription-Guided Speech Encoder for Any-to-Many Voice Conversion Without Parallel Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2734.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-4|PAPER Thu-3-3-4 — Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2720.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-6|PAPER Tue-SS-1-6-6 — Speech Pseudonymisation Assessment Using Voice Similarity Matrices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Pseudonymisation Assessment Using Voice Similarity Matrices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2483.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-10|PAPER Mon-3-11-10 — Speaker-Aware Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3019.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-8|PAPER Mon-1-4-8 — Robust Pitch Regression with Voiced/Unvoiced Classification in Nonstationary Noise Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Pitch Regression with Voiced/Unvoiced Classification in Nonstationary Noise Environments</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2982.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-8|PAPER Wed-1-11-8 — Single-Channel Speech Enhancement by Subspace Affinity Minimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single-Channel Speech Enhancement by Subspace Affinity Minimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1652.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-5|PAPER Mon-1-3-5 — Contribution of RMS-Level-Based Speech Segments to Target Speech Decoding Under Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contribution of RMS-Level-Based Speech Segments to Target Speech Decoding Under Noisy Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-7|PAPER Wed-3-12-7 — Resource-Adaptive Deep Learning for Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Resource-Adaptive Deep Learning for Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-2|PAPER Thu-1-8-2 — Mixtures of Deep Neural Experts for Automated Speech Scoring]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixtures of Deep Neural Experts for Automated Speech Scoring</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-4|PAPER Tue-1-2-4 — Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-1|PAPER Wed-3-2-1 — Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-3|PAPER Thu-1-7-3 — Training Speaker Enrollment Models by Network Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Speaker Enrollment Models by Network Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2052.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-9|PAPER Mon-3-1-9 — A 43 Language Multilingual Punctuation Prediction Neural Network Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A 43 Language Multilingual Punctuation Prediction Neural Network Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1569.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-5|PAPER Thu-1-2-5 — Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3056.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-11|PAPER Thu-3-4-11 — Voice Conversion Using Speech-to-Speech Neuro-Style Transfer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Conversion Using Speech-to-Speech Neuro-Style Transfer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3158.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-14|PAPER Wed-SS-1-6-14 — Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2215.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-6|PAPER Mon-2-3-6 — Confidence Measures in Encoder-Decoder Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measures in Encoder-Decoder Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-4|PAPER Mon-3-3-4 — ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-4|PAPER Wed-SS-1-4-4 — Surgical Mask Detection with Deep Recurrent Phonetic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Deep Recurrent Phonetic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2509.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-4|PAPER Tue-1-4-4 — F0 Slope and Mean: Cues to Speech Segmentation in French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Slope and Mean: Cues to Speech Segmentation in French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2906.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-6|PAPER Mon-1-11-6 — Learning Intonation Pattern Embeddings for Arabic Dialect Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Intonation Pattern Embeddings for Arabic Dialect Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-4|PAPER Thu-3-11-4 — Online Blind Reverberation Time Estimation Using CRNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Blind Reverberation Time Estimation Using CRNNs</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2171.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-5|PAPER Thu-3-11-5 — Single-Channel Blind Direct-to-Reverberation Ratio Estimation Using Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single-Channel Blind Direct-to-Reverberation Ratio Estimation Using Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2880.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-10|PAPER Wed-2-1-10 — BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-9|PAPER Wed-2-10-9 — Surfboard: Audio Feature Extraction for Modern Machine Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surfboard: Audio Feature Extraction for Modern Machine Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-1|PAPER Thu-1-1-1 — Vocoder-Based Speech Synthesis from Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocoder-Based Speech Synthesis from Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-8|PAPER Wed-2-10-8 — Adventitious Respiratory Classification Using Attentive Residual Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adventitious Respiratory Classification Using Attentive Residual Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-1|PAPER Mon-2-2-1 — Fast and Slow Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Slow Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-5|PAPER Thu-3-9-5 — Classification of Manifest Huntington Disease Using Vowel Distortion Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Classification of Manifest Huntington Disease Using Vowel Distortion Measures</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-9|PAPER Thu-3-9-9 — Aphasic Speech Recognition Using a Mixture of Speech Intelligibility Experts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Aphasic Speech Recognition Using a Mixture of Speech Intelligibility Experts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-4|PAPER Mon-2-8-4 — Memory Controlled Sequential Self Attention for Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Memory Controlled Sequential Self Attention for Sound Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1899.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-5|PAPER Thu-3-2-5 — Development of a Speech Quality Database Under Uncontrolled Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of a Speech Quality Database Under Uncontrolled Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-2|PAPER Wed-3-2-2 — An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2362.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-6|PAPER Thu-3-2-6 — Evaluating the Reliability of Acoustic Speech Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating the Reliability of Acoustic Speech Embeddings</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2298.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-10|PAPER Tue-1-3-10 — Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-2|PAPER Mon-1-12-2 — Kaldi-Web: An Installation-Free, On-Device Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Kaldi-Web: An Installation-Free, On-Device Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-5|PAPER Mon-2-11-5 — On Semi-Supervised LF-MMI Training of Acoustic Models with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Semi-Supervised LF-MMI Training of Acoustic Models with Limited Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2742.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-9|PAPER Mon-3-7-9 — Achieving Multi-Accent ASR via Unsupervised Acoustic Model Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Achieving Multi-Accent ASR via Unsupervised Acoustic Model Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-4|PAPER Tue-SS-1-6-4 — A Comparative Study of Speech Anonymization Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Study of Speech Anonymization Metrics</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2671.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-9|PAPER Wed-3-2-9 — Detecting and Counting Overlapping Speakers in Distant Speech Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting and Counting Overlapping Speakers in Distant Speech Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-4|PAPER Mon-3-1-4 — Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-3|PAPER Wed-2-2-3 — Deep Convolutional Spiking Neural Networks for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Convolutional Spiking Neural Networks for Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2556.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-5|PAPER Thu-3-10-5 — Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1281.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-4|PAPER Mon-3-7-4 — Speech Transformer with Speaker Aware Persistent Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Transformer with Speaker Aware Persistent Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1397.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-1|PAPER Mon-3-11-1 — SpEx+: A Complete Time Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpEx+: A Complete Time Domain Speaker Extraction Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1582.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-7|PAPER Wed-1-10-7 — Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1994.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-8|PAPER Thu-2-1-8 — Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-9|PAPER Thu-2-1-9 — Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1716.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-6|PAPER Thu-3-10-6 — Universal Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Speech Transformer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1198.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-8|PAPER Thu-3-10-8 — Cross Attention with Monotonic Alignment for Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross Attention with Monotonic Alignment for Speech Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-3|PAPER Wed-SS-2-7-3 — Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-3|PAPER Tue-1-9-3 — Differences in Gradient Emotion Perception: Human vs. Alexa Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Differences in Gradient Emotion Perception: Human vs. Alexa Voices</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2701.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-1|PAPER Thu-2-3-1 — Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0017.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-8|PAPER Mon-2-11-8 — On the Robustness and Training Dynamics of Raw Waveform Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Robustness and Training Dynamics of Raw Waveform Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-1|PAPER Tue-1-5-1 — Raw Sign and Magnitude Spectra for Multi-Head Acoustic Modelling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Sign and Magnitude Spectra for Multi-Head Acoustic Modelling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2141.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-4|PAPER Tue-1-8-4 — Combination of End-to-End and Hybrid Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combination of End-to-End and Hybrid Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-2|PAPER Thu-1-11-2 — Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1615.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-7|PAPER Thu-2-9-7 — Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-9|PAPER Wed-SS-1-6-9 — Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2664.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-9|PAPER Wed-2-11-9 — Efficient Neural Speech Synthesis for Low-Resource Languages Through Multilingual Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Neural Speech Synthesis for Low-Resource Languages Through Multilingual Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-10|PAPER Thu-2-1-10 — Phase-Aware Music Super-Resolution Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase-Aware Music Super-Resolution Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1586.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-9|PAPER Thu-3-8-9 — Language Modeling for Speech Analytics in Under-Resourced Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Modeling for Speech Analytics in Under-Resourced Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-7|PAPER Tue-1-8-7 — LVCSR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LVCSR with Transformer Language Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1244.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-9|PAPER Thu-2-8-9 — Context-Dependent Acoustic Modeling Without Explicit Phone Clustering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Acoustic Modeling Without Explicit Phone Clustering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2134.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-3|PAPER Wed-2-10-3 — Discriminative Singular Spectrum Analysis for Bioacoustic Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Singular Spectrum Analysis for Bioacoustic Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-5|PAPER Wed-2-10-5 — Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2116.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-9|PAPER Thu-1-1-9 — Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2910.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-9|PAPER Tue-1-7-9 — Understanding the Effect of Voice Quality and Accent on Talker Similarity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding the Effect of Voice Quality and Accent on Talker Similarity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1746.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-6|PAPER Thu-1-5-6 — Independent and Automatic Evaluation of Speaker-Independent Acoustic-to-Articulatory Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Independent and Automatic Evaluation of Speaker-Independent Acoustic-to-Articulatory Reconstruction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1671.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-11|PAPER Thu-3-7-11 — Perceptimatic: A Human Speech Perception Benchmark for Unsupervised Subword Modelling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptimatic: A Human Speech Perception Benchmark for Unsupervised Subword Modelling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1676.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-4|PAPER Tue-1-3-4 — Data Balancing for Boosting Performance of Low-Frequency Classes in Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Balancing for Boosting Performance of Low-Frequency Classes in Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-1|PAPER Tue-1-3-1 — Modeling ASR Ambiguity for Neural Dialogue State Tracking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling ASR Ambiguity for Neural Dialogue State Tracking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1259.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-4|PAPER Mon-2-4-4 — Voicing Distinction of Obstruents in the Hangzhou Wu Chinese Dialect]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voicing Distinction of Obstruents in the Hangzhou Wu Chinese Dialect</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1257.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-5|PAPER Wed-1-1-5 — Neutral Tone in Changde Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neutral Tone in Changde Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2683.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-2|PAPER Tue-1-4-2 — Categorization of Whistled Consonants by French Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Categorization of Whistled Consonants by French Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2697.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-3|PAPER Tue-1-4-3 — Whistled Vowel Identification by French Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whistled Vowel Identification by French Listeners</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2509.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-4|PAPER Tue-1-4-4 — F0 Slope and Mean: Cues to Speech Segmentation in French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Slope and Mean: Cues to Speech Segmentation in French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-8|PAPER Wed-1-8-8 — SpecMark: A Spectral Watermarking Framework for IP Protection of Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecMark: A Spectral Watermarking Framework for IP Protection of Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-4|PAPER Wed-SS-1-6-4 — Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1652.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-5|PAPER Mon-1-3-5 — Contribution of RMS-Level-Based Speech Segments to Target Speech Decoding Under Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contribution of RMS-Level-Based Speech Segments to Target Speech Decoding Under Noisy Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-14|PAPER Thu-3-7-14 — Classify Imaginary Mandarin Tones with Cortical EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Classify Imaginary Mandarin Tones with Cortical EEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-1|PAPER Mon-3-2-1 — Multi-Task Siamese Neural Network for Improving Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Siamese Neural Network for Improving Replay Attack Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-1|PAPER Mon-3-4-1 — Neural Architecture Search on Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search on Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2490.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-3|PAPER Mon-1-3-3 — Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-5|PAPER Mon-3-9-5 — Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1175.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-8|PAPER Thu-1-5-8 — Links Between Production and Perception of Glottalisation in Individual Australian English Speaker/Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Links Between Production and Perception of Glottalisation in Individual Australian English Speaker/Listeners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2398.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-8|PAPER Thu-1-4-8 — Self-Supervised Contrastive Learning for Unsupervised Phoneme Segmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Contrastive Learning for Unsupervised Phoneme Segmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2380.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-3|PAPER Thu-3-3-3 — Hide and Speak: Towards Deep Neural Networks for Speech Steganography]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hide and Speak: Towards Deep Neural Networks for Speech Steganography</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-4|PAPER Wed-2-8-4 — Semi-Supervised Learning with Data Augmentation for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning with Data Augmentation for End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-10|PAPER Wed-1-11-10 — NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-10|PAPER Mon-1-2-10 — A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2453.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-7|PAPER Mon-2-4-7 — //Er//-Suffixation in Southwestern Mandarin: An EMA and Ultrasound Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">//Er//-Suffixation in Southwestern Mandarin: An EMA and Ultrasound Study</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-4|PAPER Mon-3-10-4 — Coarticulation as Synchronised Sequential Target Approximation: An EMA Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coarticulation as Synchronised Sequential Target Approximation: An EMA Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2612.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-7|PAPER Tue-1-4-7 — Mandarin and English Adults’ Cue-Weighting of Lexical Stress]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin and English Adults’ Cue-Weighting of Lexical Stress</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-7|PAPER Mon-2-11-7 — Unsupervised Regularization-Based Adaptive Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Regularization-Based Adaptive Training for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1390.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-5|PAPER Mon-3-7-5 — Adaptive Speaker Normalization for CTC-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Speaker Normalization for CTC-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-3|PAPER Tue-1-2-3 — An Adaptive X-Vector Model for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive X-Vector Model for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-10|PAPER Wed-3-10-10 — Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2842.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-7|PAPER Thu-1-8-7 — ASR-Based Evaluation and Feedback for Individualized Reading Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Based Evaluation and Feedback for Individualized Reading Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-3|PAPER Tue-1-1-3 — Investigating Self-Supervised Pre-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Self-Supervised Pre-Training for End-to-End Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2944.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-8|PAPER Thu-2-7-8 — Improving On-Device Speaker Verification Using Federated Learning with Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving On-Device Speaker Verification Using Federated Learning with Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-4|PAPER Wed-SS-1-4-4 — Surgical Mask Detection with Deep Recurrent Phonetic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Deep Recurrent Phonetic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-7|PAPER Wed-3-5-7 — Cosine-Distance Virtual Adversarial Training for Semi-Supervised Speaker-Discriminative Acoustic Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cosine-Distance Virtual Adversarial Training for Semi-Supervised Speaker-Discriminative Acoustic Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-3|PAPER Mon-1-1-3 — Contextual RNN-T for Open Domain ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual RNN-T for Open Domain ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1980.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-10|PAPER Thu-3-5-10 — Towards Context-Aware End-to-End Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Context-Aware End-to-End Code-Switching Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2892.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-7|PAPER Thu-2-7-7 — A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-8|PAPER Wed-SS-1-4-8 — Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-4|PAPER Wed-2-8-4 — Semi-Supervised Learning with Data Augmentation for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning with Data Augmentation for End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2687.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-9|PAPER Wed-3-8-9 — GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2687.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-9|PAPER Wed-3-8-9 — GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-2|PAPER Mon-2-3-2 — Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1806.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-4|PAPER Mon-3-5-4 — Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1463.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-6|PAPER Thu-1-1-6 — An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1950.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-6|PAPER Mon-1-7-6 — Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-3|PAPER Wed-SS-1-6-3 — To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-3|PAPER Mon-2-11-3 — Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2079.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-6|PAPER Wed-1-5-6 — Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1344.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-6|PAPER Thu-1-3-6 — Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-1|PAPER Tue-1-10-1 — Correlating Cepstra with Formant Frequencies: Implications for Phonetically-Informed Forensic Voice Comparison]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Correlating Cepstra with Formant Frequencies: Implications for Phonetically-Informed Forensic Voice Comparison</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1473.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-6|PAPER Thu-1-10-6 — Nonlinear Residual Echo Suppression Using a Recurrent Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear Residual Echo Suppression Using a Recurrent Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1338.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-3|PAPER Mon-2-3-3 — Statistical Testing on ASR Performance via Blockwise Bootstrap]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Statistical Testing on ASR Performance via Blockwise Bootstrap</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1627.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-9|PAPER Tue-1-9-9 — An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2888.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-5|PAPER Tue-1-9-5 — Spotting the Traces of Depression in Read Speech: An Approach Based on Computational Paralinguistics and Social Signal Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spotting the Traces of Depression in Read Speech: An Approach Based on Computational Paralinguistics and Social Signal Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-6|PAPER Tue-1-5-6 — Phase Based Spectro-Temporal Features for Building a Robust ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase Based Spectro-Temporal Features for Building a Robust ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-4|PAPER Wed-2-6-4 — Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2349.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-3|PAPER Wed-3-3-3 — Very Short-Term Conflict Intensity Estimation Using Fisher Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Very Short-Term Conflict Intensity Estimation Using Fisher Vectors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-5|PAPER Thu-3-1-5 — Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2382.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-6|PAPER Tue-1-7-6 — Deep Learning Based Assessment of Synthetic Speech Naturalness]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Assessment of Synthetic Speech Naturalness</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-7|PAPER Wed-2-9-7 — DNN No-Reference PSTN Speech Quality Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DNN No-Reference PSTN Speech Quality Prediction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1125.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-8|PAPER Wed-2-9-8 — Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-9|PAPER Mon-2-11-9 — Iterative Pseudo-Labeling for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Pseudo-Labeling for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-10|PAPER Wed-2-6-10 — MLS: A Large-Scale Multilingual Dataset for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLS: A Large-Scale Multilingual Dataset for Speech Research</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2409.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-8|PAPER Wed-3-7-8 — Real Time Speech Enhancement in the Waveform Domain]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Speech Enhancement in the Waveform Domain</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1344.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-6|PAPER Thu-1-3-6 — Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2831.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-5|PAPER Thu-3-5-5 — Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1434.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-3|PAPER Wed-1-7-3 — Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-8|PAPER Mon-3-2-8 — Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1232.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-2|PAPER Thu-3-4-2 — Nonparallel Training of Exemplar-Based Voice Conversion System Using INCA-Based Alignment Technique]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Training of Exemplar-Based Voice Conversion System Using INCA-Based Alignment Technique</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-4|PAPER Mon-1-7-4 — New Advances in Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">New Advances in Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2442.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-3|PAPER Wed-1-5-3 — Knowledge Distillation from Offline to Streaming RNN Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation from Offline to Streaming RNN Transducer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-8|PAPER Tue-1-4-8 — Age-Related Differences of Tone Perception in Mandarin-Speaking Seniors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Age-Related Differences of Tone Perception in Mandarin-Speaking Seniors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-2|PAPER Mon-1-3-2 — Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1633.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-6|PAPER Mon-1-3-6 — Cortical Oscillatory Hierarchy for Natural Sentence Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cortical Oscillatory Hierarchy for Natural Sentence Processing</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-5|PAPER Wed-2-1-5 — EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-4|PAPER Wed-SS-2-7-4 — Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-4|PAPER Mon-2-5-4 — Speech Recognition and Multi-Speaker Diarization of Long Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Recognition and Multi-Speaker Diarization of Long Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-4|PAPER Mon-2-2-4 — Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-10|PAPER Wed-2-8-10 — SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-8|PAPER Thu-2-6-8 — Analysis of Disfluency in Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Disfluency in Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-5|PAPER Wed-3-7-5 — Real-Time Single-Channel Deep Neural Network-Based Speech Enhancement on Edge Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time Single-Channel Deep Neural Network-Based Speech Enhancement on Edge Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-5|PAPER Wed-2-8-5 — Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-9|PAPER Wed-3-9-9 — Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-4|PAPER Wed-2-9-4 — A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2918.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-11|PAPER Thu-2-9-11 — Controllable Neural Prosody Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controllable Neural Prosody Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2555.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-5|PAPER Mon-3-5-5 — Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1727.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-2|PAPER Mon-3-7-2 — Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2296.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-6|PAPER Thu-2-8-6 — Speaker Code Based Speaker Adaptive Training Using Model Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Code Based Speaker Adaptive Training Using Model Agnostic Meta-Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-7|PAPER Mon-3-7-7 — Learning Fast Adaptation on Cross-Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Fast Adaptation on Cross-Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-3|PAPER Mon-1-1-3 — Contextual RNN-T for Open Domain ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual RNN-T for Open Domain ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-3|PAPER Mon-2-11-3 — Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1344.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-6|PAPER Thu-1-3-6 — Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-3|PAPER Mon-2-2-3 — Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2442.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-3|PAPER Wed-1-5-3 — Knowledge Distillation from Offline to Streaming RNN Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation from Offline to Streaming RNN Transducer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2480.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-6|PAPER Wed-3-12-6 — Should we Hard-Code the Recurrence Concept or Learn it Instead ? Exploring the Transformer Architecture for Audio-Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Should we Hard-Code the Recurrence Concept or Learn it Instead ? Exploring the Transformer Architecture for Audio-Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1322.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-1|PAPER Mon-2-4-1 — Secondary Phonetic Cues in the Production of the Nasal Short-a System in California English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Secondary Phonetic Cues in the Production of the Nasal Short-a System in California English</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1335.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-9|PAPER Tue-1-4-9 — Social and Functional Pressures in Vocal Alignment: Differences for Human and Voice-AI Interlocutors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Social and Functional Pressures in Vocal Alignment: Differences for Human and Voice-AI Interlocutors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1336.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-3|PAPER Tue-1-7-3 — Perception of Concatenative vs. Neural Text-To-Speech (TTS): Differences in Intelligibility in Noise and Language Attitudes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Concatenative vs. Neural Text-To-Speech (TTS): Differences in Intelligibility in Noise and Language Attitudes</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1339.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-2|PAPER Tue-1-9-2 — Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-3|PAPER Tue-1-9-3 — Differences in Gradient Emotion Perception: Human vs. Alexa Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Differences in Gradient Emotion Perception: Human vs. Alexa Voices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2819.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-3|PAPER Thu-3-1-3 — Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-1|PAPER Tue-1-9-1 — Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-9|PAPER Wed-SS-1-4-9 — Deep Attentive End-to-End Continuous Breath Sensing from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attentive End-to-End Continuous Breath Sensing from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1409.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-4|PAPER Wed-3-8-4 — The Method of Random Directions Optimization for Stereo Audio Source Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Method of Random Directions Optimization for Stereo Audio Source Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2691.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-9|PAPER Wed-2-6-9 — Multimodal Sign Language Recognition via Temporal Deformable Convolutional Sequence Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Sign Language Recognition via Temporal Deformable Convolutional Sequence Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-7|PAPER Wed-3-12-7 — Resource-Adaptive Deep Learning for Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Resource-Adaptive Deep Learning for Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2897.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-9|PAPER Thu-1-2-9 — Low-Latency Sequence-to-Sequence Speech Recognition and Translation by Partial Hypothesis Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Latency Sequence-to-Sequence Speech Recognition and Translation by Partial Hypothesis Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-4|PAPER Tue-1-5-4 — Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1125.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-8|PAPER Wed-2-9-8 — Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-1|PAPER Wed-2-10-1 — Transfer Learning of Articulatory Information Through Phone Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning of Articulatory Information Through Phone Information</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1140.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-2|PAPER Wed-2-10-2 — Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-3|PAPER Mon-1-1-3 — Contextual RNN-T for Open Domain ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual RNN-T for Open Domain ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2964.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-7|PAPER Tue-1-9-7 — Pardon the Interruption: An Analysis of Gender and Turn-Taking in U.S. Supreme Court Oral Arguments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pardon the Interruption: An Analysis of Gender and Turn-Taking in U.S. Supreme Court Oral Arguments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-12|PAPER Wed-SS-1-4-12 — Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-1|PAPER Thu-1-1-1 — Vocoder-Based Speech Synthesis from Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocoder-Based Speech Synthesis from Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-1|PAPER Mon-3-2-1 — Multi-Task Siamese Neural Network for Improving Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Siamese Neural Network for Improving Replay Attack Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2757.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-10|PAPER Wed-3-2-10 — All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2639.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-6|PAPER Wed-1-8-6 — Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2556.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-5|PAPER Thu-3-10-5 — Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3042.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-1|PAPER Tue-1-4-1 — Attention to Indexical Information Improves Voice Recall]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention to Indexical Information Improves Voice Recall</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-9|PAPER Wed-3-9-9 — Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2929.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-7|PAPER Thu-2-6-7 — Semantic Complexity in End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Complexity in End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1434.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-3|PAPER Wed-1-7-3 — Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-4|PAPER Mon-1-9-4 — WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-8|PAPER Mon-1-5-8 — Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1539.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-6|PAPER Wed-2-5-6 — Sub-Band Knowledge Distillation Framework for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Knowledge Distillation Framework for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-7|PAPER Thu-3-2-7 — Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2530.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-9|PAPER Mon-2-7-9 — Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-3|PAPER Tue-1-8-3 — An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-3|PAPER Thu-SS-1-6-3 — Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-8|PAPER Wed-3-10-8 — Learning Syllable-Level Discrete Prosodic Representation for Expressive Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Syllable-Level Discrete Prosodic Representation for Expressive Speech Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-1|PAPER Mon-1-2-1 — Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-2|PAPER Wed-3-8-2 — Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-2|PAPER Mon-2-7-2 — Improving the Speaker Identity of Non-Parallel Many-to-Many Voice Conversion with Adversarial Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving the Speaker Identity of Non-Parallel Many-to-Many Voice Conversion with Adversarial Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2910.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-9|PAPER Tue-1-7-9 — Understanding the Effect of Voice Quality and Accent on Talker Similarity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding the Effect of Voice Quality and Accent on Talker Similarity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2588.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-5|PAPER Thu-2-11-5 — Speech Enhancement with Stochastic Temporal Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Stochastic Temporal Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1067.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-1|PAPER Wed-2-9-1 — Fundamental Frequency Model for Postfiltering at Low Bitrates in a Transform-Domain Speech and Audio Codec]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fundamental Frequency Model for Postfiltering at Low Bitrates in a Transform-Domain Speech and Audio Codec</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-3|PAPER Thu-2-8-3 — Class LM and Word Mapping for Contextual Biasing in End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class LM and Word Mapping for Contextual Biasing in End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-5|PAPER Wed-SS-3-11-5 — The JD AI Speaker Verification System for the FFSVC 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JD AI Speaker Verification System for the FFSVC 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-10|PAPER Thu-3-11-10 — Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1292.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-2|PAPER Wed-1-5-2 — Low Latency End-to-End Streaming Speech Recognition with a Scout Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency End-to-End Streaming Speech Recognition with a Scout Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2530.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-9|PAPER Mon-2-7-9 — Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-13|PAPER Thu-3-7-13 — Glottal Closure Instants Detection from EGG Signal by Classification Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Glottal Closure Instants Detection from EGG Signal by Classification Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-4|PAPER Thu-2-9-4 — Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2123.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-7|PAPER Thu-1-11-7 — JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-1|PAPER Mon-3-3-1 — Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-3|PAPER Tue-1-1-3 — Investigating Self-Supervised Pre-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Self-Supervised Pre-Training for End-to-End Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-4|PAPER Mon-3-2-4 — Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-2|PAPER Mon-3-4-2 — Acoustic Scene Classification Using Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-1|PAPER Tue-1-2-1 — Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1564.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-6|PAPER Tue-1-2-6 — Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-2|PAPER Wed-2-12-2 — Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-4|PAPER Mon-1-7-4 — New Advances in Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">New Advances in Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1742.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-7|PAPER Tue-1-2-7 — Siamese X-Vector Reconstruction for Domain Adapted Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Siamese X-Vector Reconstruction for Domain Adapted Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-6|PAPER Thu-SS-1-6-6 — Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1582.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-7|PAPER Wed-1-10-7 — Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1356.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-7|PAPER Mon-2-1-7 — Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2396.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-3|PAPER Thu-3-9-3 — Hybrid Network Feature Extraction for Depression Assessment from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Network Feature Extraction for Depression Assessment from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-2|PAPER Tue-1-7-2 — A Mask-Based Model for Mandarin Chinese Polyphone Disambiguation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mask-Based Model for Mandarin Chinese Polyphone Disambiguation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1403.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-1|PAPER Wed-3-4-1 — Unsupervised Learning for Sequence-to-Sequence Text-to-Speech for Low-Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Learning for Sequence-to-Sequence Text-to-Speech for Low-Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-4|PAPER Wed-2-2-4 — Domain Aware Training for Far-Field Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Aware Training for Far-Field Small-Footprint Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-3|PAPER Mon-1-9-3 — Multi-Modal Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Attention for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-4|PAPER Mon-3-1-4 — Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1810.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-6|PAPER Mon-3-2-6 — Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1397.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-1|PAPER Mon-3-11-1 — SpEx+: A Complete Time Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpEx+: A Complete Time Domain Speaker Extraction Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1814.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-4|PAPER Wed-1-7-4 — Audio-Visual Speaker Recognition with a Cross-Modal Discriminative Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Speaker Recognition with a Cross-Modal Discriminative Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-3|PAPER Wed-2-2-3 — Deep Convolutional Spiking Neural Networks for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Convolutional Spiking Neural Networks for Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-3|PAPER Wed-SS-2-7-3 — Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2014.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-6|PAPER Wed-3-10-6 — Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1249.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-1|PAPER Wed-SS-3-11-1 — The INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1994.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-8|PAPER Thu-2-1-8 — Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-9|PAPER Thu-2-1-9 — Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1052.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-1|PAPER Thu-SS-2-5-1 — The Attacker’s Perspective on Automatic Speaker Verification: An Overview]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Attacker’s Perspective on Automatic Speaker Verification: An Overview</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-1|PAPER Thu-2-7-1 — Speaker-Utterance Dual Attention for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Utterance Dual Attention for Speaker and Utterance Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2556.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-5|PAPER Thu-3-10-5 — Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2964.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-7|PAPER Tue-1-9-7 — Pardon the Interruption: An Analysis of Gender and Turn-Taking in U.S. Supreme Court Oral Arguments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pardon the Interruption: An Analysis of Gender and Turn-Taking in U.S. Supreme Court Oral Arguments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1997.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-2|PAPER Thu-3-6-2 — Automatic Assessment of Dysarthric Severity Level Using Audio-Video Cross-Modal Approach in Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Assessment of Dysarthric Severity Level Using Audio-Video Cross-Modal Approach in Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-2|PAPER Mon-2-1-2 — End-to-End Speech Emotion Recognition Combined with Acoustic-to-Word ASR Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Emotion Recognition Combined with Acoustic-to-Word ASR Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1997.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-2|PAPER Thu-3-6-2 — Automatic Assessment of Dysarthric Severity Level Using Audio-Video Cross-Modal Approach in Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Assessment of Dysarthric Severity Level Using Audio-Video Cross-Modal Approach in Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1934.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-3|PAPER Thu-2-4-3 — Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-7|PAPER Thu-2-8-7 — Domain Adaptation Using Class Similarity for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation Using Class Similarity for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1081.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-3|PAPER Wed-2-1-3 — Speech Clarity Improvement by Vocal Self-Training Using a Hearing Impairment Simulator and its Correlation with an Auditory Modulation Index]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Clarity Improvement by Vocal Self-Training Using a Hearing Impairment Simulator and its Correlation with an Auditory Modulation Index</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2256.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-6|PAPER Thu-3-11-6 — The Importance of Time-Frequency Averaging for Binaural Speaker Localization in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Importance of Time-Frequency Averaging for Binaural Speaker Localization in Reverberant Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-4|PAPER Thu-2-9-4 — Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-1|PAPER Mon-2-5-1 — Augmenting Turn-Taking Prediction with Wearable Eye Activity During Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Turn-Taking Prediction with Wearable Eye Activity During Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1811.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-4|PAPER Thu-2-6-4 — Wake Word Detection with Alignment-Free Lattice-Free MMI]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Wake Word Detection with Alignment-Free Lattice-Free MMI</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1606.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-7|PAPER Mon-1-8-7 — Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1459.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-8|PAPER Thu-3-6-8 — Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2152.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-5|PAPER Mon-2-8-5 — Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1966.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-8|PAPER Mon-2-10-8 — Adversarial Separation Network for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Separation Network for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1582.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-7|PAPER Wed-1-10-7 — Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2408.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-7|PAPER Wed-3-8-7 — A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-7|PAPER Thu-3-2-7 — Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-8|PAPER Thu-3-5-8 — Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1220.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-2|PAPER Thu-3-11-2 — Spatial Resolution of Early Reflection for Speech and White Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Resolution of Early Reflection for Speech and White Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-11|PAPER Thu-1-11-11 — MultiSpeech: Multi-Speaker Text to Speech with Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MultiSpeech: Multi-Speaker Text to Speech with Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-2|PAPER Thu-SS-1-6-2 — Vector-Quantized Autoregressive Predictive Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Quantized Autoregressive Predictive Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1163.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-1|PAPER Wed-2-11-1 — Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2493.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-9|PAPER Thu-3-11-9 — U-Net Based Direct-Path Dominance Test for Robust Direction-of-Arrival Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">U-Net Based Direct-Path Dominance Test for Robust Direction-of-Arrival Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-5|PAPER Mon-3-3-5 — A Deep Learning Approach to Active Noise Control]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Learning Approach to Active Noise Control</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1582.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-7|PAPER Wed-1-10-7 — Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2555.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-5|PAPER Mon-3-5-5 — Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-8|PAPER Wed-1-5-8 — Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-2|PAPER Mon-3-9-2 — iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1030.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-9|PAPER Wed-1-11-9 — Noise Tokens: Learning Neural Noise Templates for Environment-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noise Tokens: Learning Neural Noise Templates for Environment-Aware Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1615.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-7|PAPER Thu-2-9-7 — Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1753.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-2|PAPER Tue-1-3-2 — ASR Error Correction with Augmented Transformer for Entity Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Error Correction with Augmented Transformer for Entity Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2739.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-6|PAPER Thu-3-5-6 — Multilingual Speech Recognition Using Language-Specific Phoneme Recognition as Auxiliary Task for Indian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition Using Language-Specific Phoneme Recognition as Auxiliary Task for Indian Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-2|PAPER Wed-2-8-2 — Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2637.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-10|PAPER Tue-1-4-10 — Identifying Important Time-Frequency Locations in Continuous Speech Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Important Time-Frequency Locations in Continuous Speech Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1179.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-3|PAPER Thu-1-3-3 — Distilling the Knowledge of BERT for Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distilling the Knowledge of BERT for Sequence-to-Sequence ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2123.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-7|PAPER Thu-1-11-7 — JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-4|PAPER Mon-3-2-4 — Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2433.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-7|PAPER Wed-2-1-7 — Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2746.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-8|PAPER Thu-3-1-8 — Autoencoder Bottleneck Features with Multi-Task Optimisation for Improved Continuous Dysarthric Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autoencoder Bottleneck Features with Multi-Task Optimisation for Improved Continuous Dysarthric Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1825.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-9|PAPER Thu-3-6-9 — Recognising Emotions in Dysarthric Speech Using Typical Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognising Emotions in Dysarthric Speech Using Typical Speech Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-4|PAPER Thu-3-9-4 — Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3186.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-1|PAPER Thu-1-5-1 — Discovering Articulatory Speech Targets from Synthesized Random Babble]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discovering Articulatory Speech Targets from Synthesized Random Babble</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1255.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-3|PAPER Mon-3-2-3 — Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-1|PAPER Thu-1-4-1 — Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-4|PAPER Mon-2-8-4 — Memory Controlled Sequential Self Attention for Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Memory Controlled Sequential Self Attention for Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2067.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-6|PAPER Mon-1-9-6 — Group Gated Fusion on Attention-Based Bidirectional Alignment for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Group Gated Fusion on Attention-Based Bidirectional Alignment for Multimodal Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-9|PAPER Mon-2-2-9 — SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1161.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-5|PAPER Mon-2-5-5 — Investigation of Data Augmentation Techniques for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Data Augmentation Techniques for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1644.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-5|PAPER Wed-2-2-5 — Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2061.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-8|PAPER Wed-2-12-8 — Speaker-Aware Linear Discriminant Analysis in Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Linear Discriminant Analysis in Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1751.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-5|PAPER Wed-3-4-5 — Enhancing Monotonicity for Robust Autoregressive Transformer TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonicity for Robust Autoregressive Transformer TTS</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-3|PAPER Thu-SS-1-6-3 — Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-6|PAPER Wed-SS-1-4-6 — Exploring Text and Audio Embeddings for Multi-Dimension Elderly Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Text and Audio Embeddings for Multi-Dimension Elderly Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-2|PAPER Mon-2-8-2 — Environmental Sound Classification with Parallel Temporal-Spectral Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environmental Sound Classification with Parallel Temporal-Spectral Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-7|PAPER Wed-3-3-7 — Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-10|PAPER Thu-1-4-10 — Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2842.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-7|PAPER Thu-1-8-7 — ASR-Based Evaluation and Feedback for Individualized Reading Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Based Evaluation and Feedback for Individualized Reading Practice</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-1|PAPER Thu-2-10-1 — Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-10|PAPER Wed-1-3-10 — A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2383.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-2|PAPER Wed-SS-2-7-2 — The “Sound of Silence” in EEG — Cognitive Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The “Sound of Silence” in EEG — Cognitive Voice Activity Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2663.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-8|PAPER Wed-2-11-8 — Generic Indic Text-to-Speech Synthesisers with Rapid Adaptation in an End-to-End Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generic Indic Text-to-Speech Synthesisers with Rapid Adaptation in an End-to-End Framework</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2731.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-3|PAPER Thu-3-7-3 — Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2482.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-6|PAPER Thu-2-6-6 — End-to-End Named Entity Recognition from English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Named Entity Recognition from English Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-7|PAPER Mon-3-2-7 — Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-2|PAPER Mon-1-5-2 — FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-2|PAPER Mon-3-5-2 — Peking Opera Synthesis via Duration Informed Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Peking Opera Synthesis via Duration Informed Attention Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2472.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-5|PAPER Thu-2-2-5 — Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-3|PAPER Mon-3-9-3 — Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-5|PAPER Mon-3-9-5 — Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-1|PAPER Wed-2-6-1 — Unsupervised vs. Transfer Learning for Multimodal One-Shot Matching of Speech and Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised vs. Transfer Learning for Multimodal One-Shot Matching of Speech and Images</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-2|PAPER Thu-3-7-2 — Vector-Quantized Neural Networks for Acoustic Unit Discovery in the ZeroSpeech 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Quantized Neural Networks for Acoustic Unit Discovery in the ZeroSpeech 2020 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1958.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-1|PAPER Tue-1-8-1 — Robust Beam Search for Encoder-Decoder Attention Based Speech Recognition Without Length Bias]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Beam Search for Encoder-Decoder Attention Based Speech Recognition Without Length Bias</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-7|PAPER Tue-1-8-7 — LVCSR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LVCSR with Transformer Language Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1855.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-6|PAPER Wed-2-8-6 — A New Training Pipeline for an Improved Neural Transducer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Training Pipeline for an Improved Neural Transducer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2675.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-6|PAPER Thu-1-2-6 — Early Stage LM Integration Using Local and Global Log-Linear Combination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Early Stage LM Integration Using Local and Global Log-Linear Combination</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1849.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-5|PAPER Thu-1-3-5 — Investigation of Large-Margin Softmax in Neural Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Large-Margin Softmax in Neural Language Modeling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1244.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-9|PAPER Thu-2-8-9 — Context-Dependent Acoustic Modeling Without Explicit Phone Clustering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Acoustic Modeling Without Explicit Phone Clustering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2919.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-4|PAPER Thu-3-5-4 — Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2253.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-10|PAPER Thu-3-9-10 — Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-2|PAPER Wed-3-2-2 — An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2285.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-5|PAPER Thu-1-4-5 — End-to-End Domain-Adversarial Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Domain-Adversarial Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-9|PAPER Tue-1-3-9 — An Effective Domain Adaptive Post-Training Method for BERT in Response Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Domain Adaptive Post-Training Method for BERT in Response Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-7|PAPER Wed-SS-1-4-7 — Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-12|PAPER Wed-SS-1-4-12 — Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-7|PAPER Thu-2-1-7 — Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1991.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-5|PAPER Wed-3-9-5 — Quantization Aware Training with Absolute-Cosine Regularization for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantization Aware Training with Absolute-Cosine Regularization for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2259.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-4|PAPER Wed-2-10-4 — Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3046.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-13|PAPER Wed-3-10-13 — The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2210.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-8|PAPER Mon-3-11-8 — Listen to What You Want: Neural Network-Based Universal Sound Selector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen to What You Want: Neural Network-Based Universal Sound Selector</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-7|PAPER Mon-2-2-7 — CTC-Synchronous Training for Monotonic Attention Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CTC-Synchronous Training for Monotonic Attention Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1780.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-7|PAPER Wed-1-5-7 — Enhancing Monotonic Multihead Attention for Streaming ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonic Multihead Attention for Streaming ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1179.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-3|PAPER Thu-1-3-3 — Distilling the Knowledge of BERT for Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distilling the Knowledge of BERT for Sequence-to-Sequence ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-6|PAPER Thu-1-9-6 — End-to-End Speech-to-Dialog-Act Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech-to-Dialog-Act Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-6|PAPER Wed-1-3-6 — CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1066.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-1|PAPER Thu-3-4-1 — Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1642.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-4|PAPER Mon-1-5-4 — Lightweight LPCNet-Based Neural Vocoder with Tensor Decomposition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight LPCNet-Based Neural Vocoder with Tensor Decomposition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3127.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-6|PAPER Thu-3-7-6 — Exploring TTS Without T Using Biologically/Psychologically Motivated Neural Network Modules (ZeroSpeech 2020)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring TTS Without T Using Biologically/Psychologically Motivated Neural Network Modules (ZeroSpeech 2020)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2553.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-4|PAPER Wed-3-3-4 — Gaming Corpus for Studying Social Screams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gaming Corpus for Studying Social Screams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-1|PAPER Wed-SS-2-7-1 — Combining Audio and Brain Activity for Predicting Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Audio and Brain Activity for Predicting Speech Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-10|PAPER Mon-1-4-10 — Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-6|PAPER Mon-2-12-6 — Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3167.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-9|PAPER Wed-1-3-9 — Multi-Speaker Text-to-Speech Synthesis Using Deep Gaussian Processes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Speaker Text-to-Speech Synthesis Using Deep Gaussian Processes</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-5|PAPER Wed-2-11-5 — Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2469.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-9|PAPER Wed-3-4-9 — Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2347.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-8|PAPER Thu-1-11-8 — End-to-End Text-to-Speech Synthesis with Unaligned Multiple Language Units Based on Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Text-to-Speech Synthesis with Unaligned Multiple Language Units Based on Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2138.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-9|PAPER Mon-1-2-9 — Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1218.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-2|PAPER Mon-3-8-2 — Prediction of Head Motion from Speech Waveforms with a Canonical-Correlation-Constrained Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prediction of Head Motion from Speech Waveforms with a Canonical-Correlation-Constrained Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-10|PAPER Mon-1-4-10 — Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Investigation of NICT Submission for Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of NICT Submission for Short-Duration Speaker Verification Challenge 2020</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-2|PAPER Thu-1-1-2 — Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-2|PAPER Wed-2-8-2 — Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1232.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-2|PAPER Thu-3-4-2 — Nonparallel Training of Exemplar-Based Voice Conversion System Using INCA-Based Alignment Technique]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Training of Exemplar-Based Voice Conversion System Using INCA-Based Alignment Technique</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-1|PAPER Mon-1-3-1 — Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1420.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-4|PAPER Mon-2-10-4 — Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-2|PAPER Tue-1-2-2 — Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-5|PAPER Tue-1-7-5 — Deep MOS Predictor for Synthetic Speech Using Cluster-Based Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep MOS Predictor for Synthetic Speech Using Cluster-Based Modeling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-2|PAPER Wed-2-12-2 — Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0997.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-2|PAPER Thu-1-4-2 — Dual Attention in Time and Frequency Domain for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Attention in Time and Frequency Domain for Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-2|PAPER Thu-1-9-2 — Conditional Response Augmentation for Dialogue Using Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Response Augmentation for Dialogue Using Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3097.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-9|PAPER Mon-2-8-9 — Two-Stage Polyphonic Sound Event Detection Based on Faster R-CNN-LSTM with Multi-Token Connectionist Temporal Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Stage Polyphonic Sound Event Detection Based on Faster R-CNN-LSTM with Multi-Token Connectionist Temporal Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3146.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-9|PAPER Wed-3-12-9 — Lip Graph Assisted Audio-Visual Speech Recognition Using Bidirectional Synchronous Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lip Graph Assisted Audio-Visual Speech Recognition Using Bidirectional Synchronous Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2075.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-5|PAPER Wed-3-5-5 — Intra-Class Variation Reduction of Speaker Representation in Disentanglement Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intra-Class Variation Reduction of Speaker Representation in Disentanglement Framework</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1065.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-1|PAPER Wed-3-12-1 — FaceFilter: Audio-Visual Speech Separation Using Still Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FaceFilter: Audio-Visual Speech Separation Using Still Images</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1113.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-2|PAPER Wed-3-12-2 — Seeing Voices and Hearing Voices: Learning Discriminative Embeddings Using Cross-Modal Self-Supervision]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Seeing Voices and Hearing Voices: Learning Discriminative Embeddings Using Cross-Modal Self-Supervision</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-5|PAPER Thu-2-1-5 — A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2076.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-3|PAPER Thu-2-7-3 — MIRNet: Learning Multiple Identities Representations in Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MIRNet: Learning Multiple Identities Representations in Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1306.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-3|PAPER Mon-2-10-3 — Phonetically-Aware Coupled Network For Short Duration Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Coupled Network For Short Duration Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1422.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-5|PAPER Mon-2-10-5 — Vector-Based Attentive Pooling for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Based Attentive Pooling for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-3|PAPER Thu-SS-2-5-3 — Self-Supervised Spoofing Audio Detection Scheme]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Spoofing Audio Detection Scheme</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1255.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-3|PAPER Mon-3-2-3 — Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-4|PAPER Wed-1-11-4 — Constrained Ratio Mask for Speech Enhancement Using DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Ratio Mask for Speech Enhancement Using DNN</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-8|PAPER Wed-2-5-8 — Subband Kalman Filtering with DNN Estimated Parameters for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subband Kalman Filtering with DNN Estimated Parameters for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2234.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-8|PAPER Thu-1-10-8 — Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-4|PAPER Mon-1-9-4 — WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1498.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-8|PAPER Wed-3-3-8 — Effects of Communication Channels and Actor’s Gender on Emotion Identification by Native Mandarin Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Communication Channels and Actor’s Gender on Emotion Identification by Native Mandarin Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2207.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-7|PAPER Thu-2-10-7 — A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2967.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-5|PAPER Tue-1-3-5 — An Interactive Adversarial Reward Learning-Based Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Interactive Adversarial Reward Learning-Based Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1865.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-3|PAPER Thu-1-9-3 — Prototypical Q Networks for Automatic Conversational Diagnosis and Few-Shot New Disease Adaption]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prototypical Q Networks for Automatic Conversational Diagnosis and Few-Shot New Disease Adaption</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2732.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-6|PAPER Mon-2-2-6 — CAT: A CTC-CRF Based ASR Toolkit Bridging the Hybrid and the End-to-End Approaches Towards Data Efficiency and Low Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CAT: A CTC-CRF Based ASR Toolkit Bridging the Hybrid and the End-to-End Approaches Towards Data Efficiency and Low Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-3|PAPER Mon-1-5-3 — VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-4|PAPER Thu-2-9-4 — Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-1|PAPER Mon-2-2-1 — Fast and Slow Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Slow Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-5|PAPER Tue-1-5-5 — An Alternative to MFCCs for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Alternative to MFCCs for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-2|PAPER Mon-SS-2-6-2 — SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-11|PAPER Wed-SS-1-4-11 — Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1323.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-1|PAPER Wed-3-10-1 — Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1616.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-3|PAPER Wed-3-1-3 — An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1617.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-3|PAPER Mon-3-3-3 — Lite Audio-Visual Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lite Audio-Visual Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2213.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-5|PAPER Wed-1-11-5 — SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-2|PAPER Wed-3-1-2 — An Effective End-to-End Modeling Approach for Mispronunciation Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective End-to-End Modeling Approach for Mispronunciation Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2213.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-5|PAPER Wed-1-11-5 — SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1900.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-6|PAPER Mon-1-2-6 — Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-6|PAPER Mon-3-4-6 — Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-7|PAPER Mon-3-4-7 — An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-14|PAPER Thu-3-7-14 — Classify Imaginary Mandarin Tones with Cortical EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Classify Imaginary Mandarin Tones with Cortical EEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-1|PAPER Thu-3-2-1 — Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2507.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-2|PAPER Wed-2-1-2 — Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-4|PAPER Mon-1-10-4 — LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2237.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-4|PAPER Thu-2-2-4 — Speech Emotion Recognition with Discriminative Feature Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition with Discriminative Feature Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1714.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-7|PAPER Thu-2-2-7 — Learning to Recognize Per-Rater’s Emotion Perception Using Co-Rater Training Strategy with Soft and Hard Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Recognize Per-Rater’s Emotion Perception Using Co-Rater Training Strategy with Soft and Hard Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-9|PAPER Mon-SS-2-6-9 — Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-4|PAPER Mon-2-5-4 — Speech Recognition and Multi-Speaker Diarization of Long Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Recognition and Multi-Speaker Diarization of Long Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-2|PAPER Tue-1-7-2 — A Mask-Based Model for Mandarin Chinese Polyphone Disambiguation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mask-Based Model for Mandarin Chinese Polyphone Disambiguation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2296.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-6|PAPER Thu-2-8-6 — Speaker Code Based Speaker Adaptive Training Using Model Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Code Based Speaker Adaptive Training Using Model Agnostic Meta-Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1249.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-1|PAPER Wed-SS-3-11-1 — The INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-4|PAPER Mon-1-10-4 — LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2538.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-5|PAPER Thu-1-7-5 — Angular Margin Centroid Loss for Text-Independent Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Angular Margin Centroid Loss for Text-Independent Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2371.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-7|PAPER Wed-2-4-7 — Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1454.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-5|PAPER Thu-1-10-5 — Generative Adversarial Network Based Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Network Based Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2207.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-7|PAPER Thu-2-10-7 — A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-3|PAPER Thu-1-4-3 — Polishing the Classical Likelihood Ratio Test by Supervised Learning for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Polishing the Classical Likelihood Ratio Test by Supervised Learning for Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-8|PAPER Wed-1-8-8 — SpecMark: A Spectral Watermarking Framework for IP Protection of Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecMark: A Spectral Watermarking Framework for IP Protection of Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1702.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-3|PAPER Thu-2-6-3 — Improved Learning of Word Embeddings with Word Definitions and Semantic Injection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Learning of Word Embeddings with Word Definitions and Semantic Injection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1736.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-5|PAPER Mon-1-5-5 — WG-WaveNet: Real-Time High-Fidelity Speech Synthesis Without GPU]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WG-WaveNet: Real-Time High-Fidelity Speech Synthesis Without GPU</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1315.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-8|PAPER Tue-1-8-8 — DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1824.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-7|PAPER Wed-3-4-7 — Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-6|PAPER Thu-SS-1-6-6 — Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2231.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-7|PAPER Thu-SS-1-6-7 — Understanding Self-Attention of Self-Supervised Audio Transformers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding Self-Attention of Self-Supervised Audio Transformers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1570.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-1|PAPER Thu-2-4-1 — SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-4|PAPER Thu-3-4-4 — VQVC+: One-Shot Voice Conversion by Vector Quantization and U-Net Architecture]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVC+: One-Shot Voice Conversion by Vector Quantization and U-Net Architecture</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-4|PAPER Mon-3-2-4 — Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-2|PAPER Mon-3-4-2 — Acoustic Scene Classification Using Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-1|PAPER Tue-1-2-1 — Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1564.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-6|PAPER Tue-1-2-6 — Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-5|PAPER Wed-1-3-5 — Reformer-TTS: Neural Speech Synthesis with Reformer Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reformer-TTS: Neural Speech Synthesis with Reformer Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-4|PAPER Thu-1-1-4 — Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2076.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-3|PAPER Thu-2-7-3 — MIRNet: Learning Multiple Identities Representations in Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MIRNet: Learning Multiple Identities Representations in Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2962.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-1|PAPER Mon-3-7-1 — Continual Learning in Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Continual Learning in Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-5|PAPER Tue-1-5-5 — An Alternative to MFCCs for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Alternative to MFCCs for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-4|PAPER Thu-1-1-4 — Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-3|PAPER Tue-1-10-3 — Scaling Processes of Clause Chains in Pitjantjatjara]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Processes of Clause Chains in Pitjantjatjara</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-1|PAPER Wed-1-2-1 — Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-7|PAPER Thu-1-10-7 — Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-2|PAPER Mon-2-3-2 — Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2471.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-2|PAPER Mon-1-1-2 — SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-6|PAPER Mon-2-8-6 — An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1922.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-7|PAPER Wed-2-12-7 — An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-1|PAPER Thu-1-2-1 — Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1997.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-2|PAPER Thu-3-6-2 — Automatic Assessment of Dysarthric Severity Level Using Audio-Video Cross-Modal Approach in Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Assessment of Dysarthric Severity Level Using Audio-Video Cross-Modal Approach in Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-9|PAPER Mon-2-4-9 — Modeling Global Body Configurations in American Sign Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Global Body Configurations in American Sign Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-5|PAPER Thu-3-1-5 — Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1569.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-5|PAPER Thu-1-2-5 — Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-5|PAPER Mon-2-11-5 — On Semi-Supervised LF-MMI Training of Acoustic Models with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Semi-Supervised LF-MMI Training of Acoustic Models with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-8|PAPER Mon-2-9-8 — Towards an ASR Error Robust Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards an ASR Error Robust Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2253.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-10|PAPER Thu-3-9-10 — Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}}
</p></div>

{{Author Index Link Row}}
|cpborderless|k
|cpaidxlinkrowtable|k
|<$link to="AUTHOR LIST — A"><div class="cpaidxlinkrowstyle">A</div></$link>|<$link to="AUTHOR LIST — B"><div class="cpaidxlinkrowstyle">B</div></$link>|<$link to="AUTHOR LIST — C"><div class="cpaidxlinkrowstyle">C</div></$link>|<$link to="AUTHOR LIST — D"><div class="cpaidxlinkrowstyle">D</div></$link>|<$link to="AUTHOR LIST — E"><div class="cpaidxlinkrowstyle">E</div></$link>|<$link to="AUTHOR LIST — F"><div class="cpaidxlinkrowstyle">F</div></$link>|<$link to="AUTHOR LIST — G"><div class="cpaidxlinkrowstyle">G</div></$link>|<$link to="AUTHOR LIST — H"><div class="cpaidxlinkrowstyle">H</div></$link>|<$link to="AUTHOR LIST — I"><div class="cpaidxlinkrowstyle">I</div></$link>|<$link to="AUTHOR LIST — J"><div class="cpaidxlinkrowstyle">J</div></$link>|<$link to="AUTHOR LIST — K"><div class="cpaidxlinkrowstyle">K</div></$link>|<$link to="AUTHOR LIST — L"><div class="cpaidxlinkrowstyle">L</div></$link>|<$link to="AUTHOR LIST — M"><div class="cpaidxlinkrowstyle">M</div></$link>|
|<$link to="AUTHOR LIST — N"><div class="cpaidxlinkrowstyle">N</div></$link>|<$link to="AUTHOR LIST — O"><div class="cpaidxlinkrowstyle">O</div></$link>|<$link to="AUTHOR LIST — P"><div class="cpaidxlinkrowstyle">P</div></$link>|<$link to="AUTHOR LIST — Q"><div class="cpaidxlinkrowstyle">Q</div></$link>|<$link to="AUTHOR LIST — R"><div class="cpaidxlinkrowstyle">R</div></$link>|<$link to="AUTHOR LIST — S"><div class="cpaidxlinkrowstyle">S</div></$link>|<$link to="AUTHOR LIST — T"><div class="cpaidxlinkrowstyle">T</div></$link>|<$link to="AUTHOR LIST — U"><div class="cpaidxlinkrowstyle">U</div></$link>|<$link to="AUTHOR LIST — V"><div class="cpaidxlinkrowstyle">V</div></$link>|<$link to="AUTHOR LIST — W"><div class="cpaidxlinkrowstyle">W</div></$link>|<$link to="AUTHOR LIST — X"><div class="cpaidxlinkrowstyle">X</div></$link>|<$link to="AUTHOR LIST — Y"><div class="cpaidxlinkrowstyle">Y</div></$link>|<$link to="AUTHOR LIST — Z"><div class="cpaidxlinkrowstyle">Z</div></$link>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2701.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-1|PAPER Thu-2-3-1 — Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-3|PAPER Mon-1-5-3 — VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3097.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-9|PAPER Mon-2-8-9 — Two-Stage Polyphonic Sound Event Detection Based on Faster R-CNN-LSTM with Multi-Token Connectionist Temporal Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Stage Polyphonic Sound Event Detection Based on Faster R-CNN-LSTM with Multi-Token Connectionist Temporal Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1460.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-5|PAPER Thu-2-3-5 — Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-3|PAPER Tue-1-9-3 — Differences in Gradient Emotion Perception: Human vs. Alexa Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Differences in Gradient Emotion Perception: Human vs. Alexa Voices</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2701.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-1|PAPER Thu-2-3-1 — Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1708.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-1|PAPER Mon-1-11-1 — Metric Learning Loss Functions to Reduce Domain Mismatch in the x-Vector Space for Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metric Learning Loss Functions to Reduce Domain Mismatch in the x-Vector Space for Language Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-5|PAPER Mon-2-11-5 — On Semi-Supervised LF-MMI Training of Acoustic Models with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Semi-Supervised LF-MMI Training of Acoustic Models with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-9|PAPER Wed-3-3-9 — Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-8|PAPER Wed-SS-1-4-8 — Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-2|PAPER Mon-1-10-2 — Developing an Open-Source Corpus of Yoruba Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing an Open-Source Corpus of Yoruba Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-5|PAPER Mon-3-10-5 — Improved Model for Vocal Folds with a Polyp with Potential Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Model for Vocal Folds with a Polyp with Potential Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-5|PAPER Wed-3-7-5 — Real-Time Single-Channel Deep Neural Network-Based Speech Enhancement on Edge Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time Single-Channel Deep Neural Network-Based Speech Enhancement on Edge Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2215.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-6|PAPER Mon-2-3-6 — Confidence Measures in Encoder-Decoder Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measures in Encoder-Decoder Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-5|PAPER Thu-3-1-5 — Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-1|PAPER Wed-SS-2-7-1 — Combining Audio and Brain Activity for Predicting Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Audio and Brain Activity for Predicting Speech Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-4|PAPER Tue-1-2-4 — Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-4|PAPER Mon-1-8-4 — Towards a Competitive End-to-End Speech Recognition for CHiME-6 Dinner Party Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Competitive End-to-End Speech Recognition for CHiME-6 Dinner Party Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-9|PAPER Wed-3-3-9 — Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-7|PAPER Thu-1-10-7 — Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2462.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-8|PAPER Thu-3-11-8 — Instantaneous Time Delay Estimation of Broadband Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Instantaneous Time Delay Estimation of Broadband Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-5|PAPER Mon-3-10-5 — Improved Model for Vocal Folds with a Polyp with Potential Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Model for Vocal Folds with a Polyp with Potential Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2807.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-10|PAPER Mon-1-8-10 — Simulating Realistically-Spatialised Simultaneous Speech Using Video-Driven Speaker Detection and the CHiME-5 Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simulating Realistically-Spatialised Simultaneous Speech Using Video-Driven Speaker Detection and the CHiME-5 Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2880.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-10|PAPER Wed-2-1-10 — BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-9|PAPER Wed-2-10-9 — Surfboard: Audio Feature Extraction for Modern Machine Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surfboard: Audio Feature Extraction for Modern Machine Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-10|PAPER Wed-3-4-10 — Hider-Finder-Combiner: An Adversarial Architecture for General Speech Signal Modification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hider-Finder-Combiner: An Adversarial Architecture for General Speech Signal Modification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-9|PAPER Mon-2-11-9 — Iterative Pseudo-Labeling for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Pseudo-Labeling for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-2|PAPER Wed-3-10-2 — Non-Parallel Emotion Conversion Using a Deep-Generative Hybrid Network and an Adversarial Pair Discriminator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Emotion Conversion Using a Deep-Generative Hybrid Network and an Adversarial Pair Discriminator</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1395.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-2|PAPER Wed-3-5-2 — How Does Label Noise Affect the Quality of Speaker Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How Does Label Noise Affect the Quality of Speaker Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2116.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-9|PAPER Thu-1-1-9 — Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-5|PAPER Mon-1-1-5 — Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-4|PAPER Thu-2-9-4 — Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2664.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-9|PAPER Wed-2-11-9 — Efficient Neural Speech Synthesis for Low-Resource Languages Through Multilingual Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Neural Speech Synthesis for Low-Resource Languages Through Multilingual Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2587.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-5|PAPER Wed-SS-1-6-5 — Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2970.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-10|PAPER Wed-3-5-10 — Learning Speaker Embedding from Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Embedding from Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2123.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-7|PAPER Thu-1-11-7 — JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-8|PAPER Mon-1-7-8 — Spot the Conversation: Speaker Diarisation in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Conversation: Speaker Diarisation in the Wild</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-13|PAPER Wed-SS-1-6-13 — Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2326.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-6|PAPER Thu-1-4-6 — VOP Detection in Variable Speech Rate Condition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VOP Detection in Variable Speech Rate Condition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-2|PAPER Mon-SS-2-6-2 — SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1569.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-5|PAPER Thu-1-2-5 — Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2514.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-8|PAPER Thu-3-8-8 — Improving Speech Recognition of Compound-Rich Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition of Compound-Rich Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1420.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-4|PAPER Mon-2-10-4 — Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2152.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-5|PAPER Mon-2-8-5 — Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1411.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-5|PAPER Thu-2-9-5 — Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1750.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-4|PAPER Thu-2-8-4 — Do End-to-End Speech Recognition Models Care About Context?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do End-to-End Speech Recognition Models Care About Context?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1241.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-2|PAPER Tue-1-1-2 — Efficient Wait-k Models for Simultaneous Machine Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Wait-k Models for Simultaneous Machine Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1465.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-5|PAPER Thu-3-8-5 — Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-7|PAPER Mon-1-4-7 — Unsupervised Methods for Evaluating Speech Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Methods for Evaluating Speech Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-4|PAPER Mon-1-11-4 — What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3078.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-7|PAPER Tue-1-1-7 — Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1996.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-5|PAPER Wed-1-7-5 — Multimodal Association for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Association for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-2|PAPER Thu-SS-1-6-2 — Vector-Quantized Autoregressive Predictive Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Quantized Autoregressive Predictive Coding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3084.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-8|PAPER Thu-SS-1-6-8 — A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1865.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-3|PAPER Thu-1-9-3 — Prototypical Q Networks for Automatic Conversational Diagnosis and Few-Shot New Disease Adaption]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prototypical Q Networks for Automatic Conversational Diagnosis and Few-Shot New Disease Adaption</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1753.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-2|PAPER Tue-1-3-2 — ASR Error Correction with Augmented Transformer for Entity Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Error Correction with Augmented Transformer for Entity Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-4|PAPER Wed-1-5-4 — Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2758.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-2|PAPER Thu-3-1-2 — Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3135.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-4|PAPER Thu-3-1-4 — Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3084.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-8|PAPER Thu-SS-1-6-8 — A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2897.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-9|PAPER Thu-1-2-9 — Low-Latency Sequence-to-Sequence Speech Recognition and Translation by Partial Hypothesis Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Latency Sequence-to-Sequence Speech Recognition and Translation by Partial Hypothesis Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-3|PAPER Mon-3-9-3 — Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-5|PAPER Mon-3-9-5 — Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-3|PAPER Wed-2-9-3 — Improving Opus Low Bit Rate Quality with Neural Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Opus Low Bit Rate Quality with Neural Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2867.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-10|PAPER Thu-1-1-10 — SpeedySpeech: Efficient Neural Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeedySpeech: Efficient Neural Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1611.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-8|PAPER Tue-1-9-8 — Are Germans Better Haters Than Danes? Language-Specific Implicit Prosodies of Types of Hate Speech and How They Relate to Perceived Severity and Societal Rules]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are Germans Better Haters Than Danes? Language-Specific Implicit Prosodies of Types of Hate Speech and How They Relate to Perceived Severity and Societal Rules</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1607.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-2|PAPER Tue-1-10-2 — Prosody and Breathing: A Comparison Between Rhetorical and Information-Seeking Questions in German and Brazilian Portuguese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody and Breathing: A Comparison Between Rhetorical and Information-Seeking Questions in German and Brazilian Portuguese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1801.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-2|PAPER Thu-3-9-2 — An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-5|PAPER Wed-2-8-5 — Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2618.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-10|PAPER Thu-1-11-10 — Testing the Limits of Representation Mixing for Pronunciation Correction in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Testing the Limits of Representation Mixing for Pronunciation Correction in End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2657.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-6|PAPER Thu-2-3-6 — Cues for Perception of Gender in Synthetic Voices and the Role of Identity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cues for Perception of Gender in Synthetic Voices and the Role of Identity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1547.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-4|PAPER Tue-1-7-4 — Enhancing Sequence-to-Sequence Text-to-Speech with Morphology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Sequence-to-Sequence Text-to-Speech with Morphology</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2618.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-10|PAPER Thu-1-11-10 — Testing the Limits of Representation Mixing for Pronunciation Correction in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Testing the Limits of Representation Mixing for Pronunciation Correction in End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1446.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-6|PAPER Mon-2-10-6 — Self-Attention Encoding and Pooling for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention Encoding and Pooling for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-6|PAPER Wed-2-12-6 — Unsupervised Training of Siamese Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Training of Siamese Networks for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-3|PAPER Mon-1-1-3 — Contextual RNN-T for Open Domain ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual RNN-T for Open Domain ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3115.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-9|PAPER Wed-2-4-9 — Unsupervised Audio Source Separation Using Generative Priors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Audio Source Separation Using Generative Priors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2236.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-5|PAPER Mon-1-4-5 — Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2720.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-6|PAPER Tue-SS-1-6-6 — Speech Pseudonymisation Assessment Using Voice Similarity Matrices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Pseudonymisation Assessment Using Voice Similarity Matrices</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1857.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-5|PAPER Wed-2-12-5 — Multi-Task Learning for Voice Related Recognition Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning for Voice Related Recognition Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2730.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-5|PAPER Wed-SS-1-12-5 — A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-6|PAPER Wed-SS-1-12-6 — PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-3|PAPER Wed-2-9-3 — Improving Opus Low Bit Rate Quality with Neural Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Opus Low Bit Rate Quality with Neural Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-1|PAPER Tue-1-3-1 — Modeling ASR Ambiguity for Neural Dialogue State Tracking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling ASR Ambiguity for Neural Dialogue State Tracking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2687.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-9|PAPER Wed-3-8-9 — GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-4|PAPER Mon-3-2-4 — Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-2|PAPER Mon-3-4-2 — Acoustic Scene Classification Using Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-1|PAPER Tue-1-2-1 — Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1564.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-6|PAPER Tue-1-2-6 — Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1400.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-2|PAPER Wed-1-11-2 — Incorporating Broad Phonetic Information for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Broad Phonetic Information for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-3|PAPER Wed-SS-1-6-3 — To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1493.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-4|PAPER Thu-1-3-4 — Stochastic Convolutional Recurrent Networks for Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stochastic Convolutional Recurrent Networks for Language Modeling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1313.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-1|PAPER Thu-1-9-1 — Stochastic Curiosity Exploration for Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stochastic Curiosity Exploration for Dialogue Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-4|PAPER Thu-2-7-4 — Strategies for End-to-End Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Strategies for End-to-End Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-2|PAPER Wed-1-3-2 — Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1688.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-8|PAPER Mon-1-9-8 — Using Speaker-Aligned Graph Memory Block in Multimodally Attentive Emotion Recognition Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speaker-Aligned Graph Memory Block in Multimodally Attentive Emotion Recognition Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2567.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-8|PAPER Tue-1-7-8 — An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1615.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-7|PAPER Thu-2-9-7 — Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2857.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-10|PAPER Wed-SS-1-4-10 — Paralinguistic Classification of Mask Wearing by Image Classifiers and Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Paralinguistic Classification of Mask Wearing by Image Classifiers and Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1252.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-1|PAPER Wed-SS-2-3-1 — Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Cross-Lingual Speaker Verification with Domain-Balanced Hard Prototype Mining and Language-Dependent Score Normalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Speaker Verification with Domain-Balanced Hard Prototype Mining and Language-Dependent Score Normalization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2650.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-7|PAPER Thu-1-7-7 — ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1068.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-5|PAPER Thu-2-10-5 — Perception of Japanese Consonant Length by Native Speakers of Korean Differing in Japanese Learning Experience]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Japanese Consonant Length by Native Speakers of Korean Differing in Japanese Learning Experience</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2141.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-4|PAPER Tue-1-8-4 — Combination of End-to-End and Hybrid Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combination of End-to-End and Hybrid Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2947.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-4|PAPER Mon-1-1-4 — ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-6|PAPER Wed-3-7-6 — Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-4|PAPER Wed-SS-2-7-4 — Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1740.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-7|PAPER Mon-3-3-7 — End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-1|PAPER Thu-1-1-1 — Vocoder-Based Speech Synthesis from Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocoder-Based Speech Synthesis from Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1047.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-2|PAPER Wed-2-5-2 — An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-3|PAPER Tue-SS-1-6-3 — X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-4|PAPER Wed-2-8-4 — Semi-Supervised Learning with Data Augmentation for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning with Data Augmentation for End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2970.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-10|PAPER Wed-3-5-10 — Learning Speaker Embedding from Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Embedding from Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-5|PAPER Thu-SS-2-5-5 — x-Vectors Meet Adversarial Attacks: Benchmarking Adversarial Robustness in Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vectors Meet Adversarial Attacks: Benchmarking Adversarial Robustness in Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2834.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-6|PAPER Thu-SS-2-5-6 — Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3000.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-10|PAPER Thu-3-7-10 — Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1246.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-7|PAPER Mon-2-9-7 — Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-1|PAPER Mon-2-11-1 — Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1644.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-5|PAPER Wed-2-2-5 — Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-10|PAPER Mon-1-2-10 — A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1727.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-2|PAPER Mon-3-7-2 — Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2296.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-6|PAPER Thu-2-8-6 — Speaker Code Based Speaker Adaptive Training Using Model Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Code Based Speaker Adaptive Training Using Model Agnostic Meta-Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-2|PAPER Thu-2-11-2 — A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-7|PAPER Wed-1-2-7 — Building a Robust Word-Level Wakeword Verification Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Robust Word-Level Wakeword Verification Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-5|PAPER Thu-1-9-5 — Task-Oriented Dialog Generation with Enhanced Entity Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Task-Oriented Dialog Generation with Enhanced Entity Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2304.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-7|PAPER Mon-3-8-7 — Speech Driven Talking Head Generation via Attentional Landmarks Based Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Talking Head Generation via Attentional Landmarks Based Representation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2556.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-5|PAPER Thu-3-10-5 — Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2483.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-10|PAPER Mon-3-11-10 — Speaker-Aware Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-4|PAPER Mon-1-10-4 — LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2516.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-2|PAPER Wed-SS-1-6-2 — Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2516.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-2|PAPER Wed-SS-1-6-2 — Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1834.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-1|PAPER Mon-3-1-1 — Autosegmental Neural Nets: Should Phones and Tones be Synchronous or Asynchronous?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autosegmental Neural Nets: Should Phones and Tones be Synchronous or Asynchronous?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-4|PAPER Wed-2-11-4 — Phonological Features for 0-Shot Multilingual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Features for 0-Shot Multilingual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-6|PAPER Wed-3-4-6 — Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-6|PAPER Mon-3-11-6 — A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2418.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-10|PAPER Wed-2-5-10 — Speaker-Conditional Chain Model for Speech Separation and Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Conditional Chain Model for Speech Separation and Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-5|PAPER Thu-1-9-5 — Task-Oriented Dialog Generation with Enhanced Entity Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Task-Oriented Dialog Generation with Enhanced Entity Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2530.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-9|PAPER Mon-2-7-9 — Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1705.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-9|PAPER Mon-1-9-9 — Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1703.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-6|PAPER Wed-1-9-6 — Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1391.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-1|PAPER Thu-2-2-1 — Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1806.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-4|PAPER Mon-3-5-4 — Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1109.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-1|PAPER Mon-3-8-1 — Adversarially Trained Multi-Singer Sequence-to-Sequence Singing Synthesizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarially Trained Multi-Singer Sequence-to-Sequence Singing Synthesizer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1410.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-3|PAPER Mon-3-8-3 — XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1644.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-5|PAPER Wed-2-2-5 — Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-7|PAPER Thu-1-4-7 — MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-5|PAPER Mon-1-1-5 — Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1950.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-6|PAPER Mon-1-7-6 — Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-1|PAPER Mon-2-2-1 — Fast and Slow Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Slow Acoustic Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2904.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-9|PAPER Tue-1-5-9 — Bandpass Noise Generation and Augmentation for Unified ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bandpass Noise Generation and Augmentation for Unified ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-1|PAPER Wed-1-5-1 — 1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2555.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-5|PAPER Mon-3-5-5 — Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2688.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-4|PAPER Wed-SS-3-11-4 — NPU Speaker Verification System for INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NPU Speaker Verification System for INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-1|PAPER Thu-1-2-1 — Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-7|PAPER Thu-2-8-7 — Domain Adaptation Using Class Similarity for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation Using Class Similarity for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1966.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-8|PAPER Mon-2-10-8 — Adversarial Separation Network for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Separation Network for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2024.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-6|PAPER Mon-3-10-6 — Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-1|PAPER Thu-1-7-1 — Dynamic Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Margin Softmax Loss for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-10|PAPER Mon-2-5-10 — Focal Loss for Punctuation Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Focal Loss for Punctuation Prediction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-6|PAPER Mon-2-7-6 — Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1754.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-3|PAPER Wed-2-11-3 — Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-5|PAPER Wed-3-8-5 — Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1600.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-8|PAPER Wed-3-9-8 — Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-3|PAPER Thu-1-11-3 — Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1737.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-4|PAPER Thu-1-11-4 — Bi-Level Speaker Supervision for One-Shot Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Level Speaker Supervision for One-Shot Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1225.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-9|PAPER Thu-2-11-9 — Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-6|PAPER Thu-3-4-6 — Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-7|PAPER Thu-3-10-7 — Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1705.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-9|PAPER Mon-1-9-9 — Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-10|PAPER Mon-2-5-10 — Focal Loss for Punctuation Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Focal Loss for Punctuation Prediction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-6|PAPER Mon-2-7-6 — Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1703.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-6|PAPER Wed-1-9-6 — Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1754.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-3|PAPER Wed-2-11-3 — Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-5|PAPER Wed-3-8-5 — Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1600.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-8|PAPER Wed-3-9-8 — Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-3|PAPER Thu-1-11-3 — Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1737.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-4|PAPER Thu-1-11-4 — Bi-Level Speaker Supervision for One-Shot Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Level Speaker Supervision for One-Shot Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1391.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-1|PAPER Thu-2-2-1 — Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1536.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-6|PAPER Thu-2-2-6 — Comparison of Glottal Source Parameter Values in Emotional Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Glottal Source Parameter Values in Emotional Vowels</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1225.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-9|PAPER Thu-2-11-9 — Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-6|PAPER Thu-3-4-6 — Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-7|PAPER Thu-3-4-7 — ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2396.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-3|PAPER Thu-3-9-3 — Hybrid Network Feature Extraction for Depression Assessment from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Network Feature Extraction for Depression Assessment from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-7|PAPER Thu-3-10-7 — Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1987.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-6|PAPER Wed-1-1-6 — Pitch Declination and Final Lowering in Northeastern Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pitch Declination and Final Lowering in Northeastern Mandarin</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1685.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-10|PAPER Wed-1-10-10 — The Different Enhancement Roles of Covarying Cues in Thai and Mandarin Tones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Different Enhancement Roles of Covarying Cues in Thai and Mandarin Tones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-2|PAPER Mon-1-2-2 — Neural Spatio-Temporal Beamformer for Target Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatio-Temporal Beamformer for Target Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1727.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-2|PAPER Mon-3-7-2 — Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2304.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-7|PAPER Mon-3-8-7 — Speech Driven Talking Head Generation via Attentional Landmarks Based Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Talking Head Generation via Attentional Landmarks Based Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-7|PAPER Mon-3-11-7 — Time-Domain Target-Speaker Speech Separation with Waveform-Based Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Time-Domain Target-Speaker Speech Separation with Waveform-Based Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1161.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-5|PAPER Mon-2-5-5 — Investigation of Data Augmentation Techniques for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Data Augmentation Techniques for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3109.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-9|PAPER Wed-3-1-9 — Recognize Mispronunciations to Improve Non-Native Acoustic Modeling Through a Phone Decoder Built from One Edit Distance Finite State Automaton]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognize Mispronunciations to Improve Non-Native Acoustic Modeling Through a Phone Decoder Built from One Edit Distance Finite State Automaton</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-2|PAPER Mon-1-3-2 — Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1633.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-6|PAPER Mon-1-3-6 — Cortical Oscillatory Hierarchy for Natural Sentence Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cortical Oscillatory Hierarchy for Natural Sentence Processing</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1397.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-1|PAPER Mon-3-11-1 — SpEx+: A Complete Time Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpEx+: A Complete Time Domain Speaker Extraction Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-4|PAPER Wed-1-9-4 — Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-5|PAPER Wed-2-1-5 — EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1700.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-3|PAPER Wed-2-12-3 — Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-1|PAPER Thu-1-7-1 — Dynamic Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Margin Softmax Loss for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1820.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-6|PAPER Thu-2-4-6 — Dimensional Emotion Prediction Based on Interactive Context in Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dimensional Emotion Prediction Based on Interactive Context in Conversation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1755.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-3|PAPER Thu-3-6-3 — Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2105.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-6|PAPER Mon-2-5-6 — A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-10|PAPER Mon-2-10-10 — Evolutionary Algorithm Enhanced Neural Architecture Search for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evolutionary Algorithm Enhanced Neural Architecture Search for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-3|PAPER Tue-1-3-3 — Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-7|PAPER Thu-1-4-7 — MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-8|PAPER Thu-2-9-8 — Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-8|PAPER Thu-3-5-8 — Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2143.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-3|PAPER Thu-2-11-3 — HiFi-GAN: High-Fidelity Denoising and Dereverberation Based on Speech Deep Features in Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">HiFi-GAN: High-Fidelity Denoising and Dereverberation Based on Speech Deep Features in Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2427.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-7|PAPER Tue-1-7-7 — Distant Supervision for Polyphone Disambiguation in Mandarin Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distant Supervision for Polyphone Disambiguation in Mandarin Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-6|PAPER Mon-1-10-6 — CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3094.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-8|PAPER Tue-1-1-8 — Self-Supervised Representations Improve End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Representations Improve End-to-End Speech Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2955.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-1|PAPER Thu-3-5-1 — Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-3|PAPER Mon-3-1-3 — Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-8|PAPER Wed-3-1-8 — Context-Aware Goodness of Pronunciation for Computer-Assisted Pronunciation Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Aware Goodness of Pronunciation for Computer-Assisted Pronunciation Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2562.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-6|PAPER Thu-1-7-6 — Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2427.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-7|PAPER Tue-1-7-7 — Distant Supervision for Polyphone Disambiguation in Mandarin Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distant Supervision for Polyphone Disambiguation in Mandarin Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-4|PAPER Wed-1-9-4 — Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2320.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-2|PAPER Mon-1-9-2 — Multimodal Deception Detection Using Automatically Extracted Acoustic, Visual, and Lexical Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Deception Detection Using Automatically Extracted Acoustic, Visual, and Lexical Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-5|PAPER Mon-2-3-5 — Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-3|PAPER Wed-2-2-3 — Deep Convolutional Spiking Neural Networks for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Convolutional Spiking Neural Networks for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-3|PAPER Mon-1-9-3 — Multi-Modal Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Attention for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1810.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-6|PAPER Mon-3-2-6 — Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2595.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-3|PAPER Wed-1-10-3 — The Effect of Input on the Production of English Tense and Lax Vowels by Chinese Learners: Evidence from an Elementary School in China]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Input on the Production of English Tense and Lax Vowels by Chinese Learners: Evidence from an Elementary School in China</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-8|PAPER Wed-1-5-8 — Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-13|PAPER Wed-SS-1-6-13 — Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1109.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-1|PAPER Mon-3-8-1 — Adversarially Trained Multi-Singer Sequence-to-Sequence Singing Synthesizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarially Trained Multi-Singer Sequence-to-Sequence Singing Synthesizer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1410.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-3|PAPER Mon-3-8-3 — XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-6|PAPER Mon-2-8-6 — An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-9|PAPER Mon-3-2-9 — Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-4|PAPER Wed-1-8-4 — S2IGAN: Speech-to-Image Generation via Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">S2IGAN: Speech-to-Image Generation via Adversarial Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2440.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-10|PAPER Mon-3-1-10 — Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1233.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-5|PAPER Tue-1-8-5 — Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1294.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-3|PAPER Wed-1-8-3 — Adversarial Audio: A New Information Hiding Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Audio: A New Information Hiding Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2907.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-6|PAPER Tue-1-3-6 — Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-11|PAPER Thu-1-11-11 — MultiSpeech: Multi-Speaker Text to Speech with Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MultiSpeech: Multi-Speaker Text to Speech with Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2116.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-9|PAPER Thu-1-1-9 — Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2427.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-7|PAPER Tue-1-7-7 — Distant Supervision for Polyphone Disambiguation in Mandarin Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distant Supervision for Polyphone Disambiguation in Mandarin Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-4|PAPER Mon-1-3-4 — Congruent Audiovisual Speech Enhances Cortical Envelope Tracking During Auditory Selective Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Congruent Audiovisual Speech Enhances Cortical Envelope Tracking During Auditory Selective Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1869.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-1|PAPER Mon-2-1-1 — Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2048.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-3|PAPER Thu-1-10-3 — SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2453.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-7|PAPER Mon-2-4-7 — //Er//-Suffixation in Southwestern Mandarin: An EMA and Ultrasound Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">//Er//-Suffixation in Southwestern Mandarin: An EMA and Ultrasound Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1923.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-2|PAPER Mon-1-11-2 — The XMUSPEECH System for the AP19-OLR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for the AP19-OLR Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1960.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-3|PAPER Mon-1-11-3 — On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2234.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-8|PAPER Thu-1-10-8 — Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2479.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-9|PAPER Thu-1-10-9 — Improving Partition-Block-Based Acoustic Echo Canceler in Under-Modeling Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Partition-Block-Based Acoustic Echo Canceler in Under-Modeling Scenarios</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2493.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-9|PAPER Thu-3-11-9 — U-Net Based Direct-Path Dominance Test for Robust Direction-of-Arrival Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">U-Net Based Direct-Path Dominance Test for Robust Direction-of-Arrival Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2947.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-4|PAPER Mon-1-1-4 — ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-6|PAPER Mon-3-11-6 — A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2418.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-10|PAPER Wed-2-5-10 — Speaker-Conditional Chain Model for Speech Separation and Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Conditional Chain Model for Speech Separation and Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2105.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-6|PAPER Mon-2-5-6 — A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-10|PAPER Mon-2-10-10 — Evolutionary Algorithm Enhanced Neural Architecture Search for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evolutionary Algorithm Enhanced Neural Architecture Search for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-3|PAPER Tue-1-3-3 — Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1647.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-4|PAPER Wed-3-10-4 — Nonparallel Emotional Speech Conversion Using VAE-GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion Using VAE-GAN</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-7|PAPER Thu-1-4-7 — MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-8|PAPER Thu-2-9-8 — Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-1|PAPER Thu-3-2-1 — Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-9|PAPER Thu-3-4-9 — Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1422.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-5|PAPER Mon-2-10-5 — Vector-Based Attentive Pooling for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Based Attentive Pooling for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-1|PAPER Mon-2-7-1 — Recognition-Synthesis Based Non-Parallel Voice Conversion with Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition-Synthesis Based Non-Parallel Voice Conversion with Adversarial Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1678.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-2|PAPER Wed-1-10-2 — The Effect of Language Dominance on the Selective Attention of Segments and Tones in Urdu-Cantonese Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Language Dominance on the Selective Attention of Segments and Tones in Urdu-Cantonese Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2350.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-8|PAPER Mon-2-4-8 — Electroglottographic-Phonetic Study on Korean Phonation Induced by Tripartite Plosives in Yanbian Korean]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Electroglottographic-Phonetic Study on Korean Phonation Induced by Tripartite Plosives in Yanbian Korean</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2061.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-8|PAPER Wed-2-12-8 — Speaker-Aware Linear Discriminant Analysis in Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Linear Discriminant Analysis in Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-2|PAPER Wed-2-4-2 — On Synthesis for Supervised Monaural Speech Separation in Time Domain]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Synthesis for Supervised Monaural Speech Separation in Time Domain</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2205.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-6|PAPER Wed-2-4-6 — Dual-Path Transformer Network: Direct Context-Aware Modeling for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Path Transformer Network: Direct Context-Aware Modeling for End-to-End Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1849.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-5|PAPER Thu-1-3-5 — Investigation of Large-Margin Softmax in Neural Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Large-Margin Softmax in Neural Language Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-6|PAPER Wed-1-2-6 — AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-9|PAPER Mon-2-10-9 — Text-Independent Speaker Verification with Dual Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Text-Independent Speaker Verification with Dual Attention Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1590.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-3|PAPER Wed-3-4-3 — Towards Universal Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Universal Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1218.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-2|PAPER Mon-3-8-2 — Prediction of Head Motion from Speech Waveforms with a Canonical-Correlation-Constrained Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prediction of Head Motion from Speech Waveforms with a Canonical-Correlation-Constrained Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3163.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-10|PAPER Mon-1-1-10 — Effect of Adding Positional Information on Convolutional Neural Networks for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effect of Adding Positional Information on Convolutional Neural Networks for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-3|PAPER Mon-1-5-3 — VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-3|PAPER Mon-1-4-3 — Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-3|PAPER Mon-2-12-3 — A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-3|PAPER Wed-1-1-3 — An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-8|PAPER Wed-1-10-8 — Perception and Production of Mandarin Initial Stops by Native Urdu Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception and Production of Mandarin Initial Stops by Native Urdu Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-1|PAPER Wed-3-1-1 — Automatic Scoring at Multi-Granularity for L2 Pronunciation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Scoring at Multi-Granularity for L2 Pronunciation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-5|PAPER Wed-3-1-5 — Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1284.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-2|PAPER Thu-2-9-2 — Joint Detection of Sentence Stress and Phrase Boundary for Prosody]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Detection of Sentence Stress and Phrase Boundary for Prosody</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-5|PAPER Mon-1-2-5 — Differential Beamforming for Uniform Circular Array with Directional Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Differential Beamforming for Uniform Circular Array with Directional Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-5|PAPER Wed-2-8-5 — Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-6|PAPER Thu-2-7-6 — Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-5|PAPER Thu-2-1-5 — A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-1|PAPER Mon-1-1-1 — On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-3|PAPER Mon-3-7-3 — Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2141.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-4|PAPER Tue-1-8-4 — Combination of End-to-End and Hybrid Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combination of End-to-End and Hybrid Models for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1292.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-2|PAPER Wed-1-5-2 — Low Latency End-to-End Streaming Speech Recognition with a Scout Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency End-to-End Streaming Speech Recognition with a Scout Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-10|PAPER Wed-1-5-10 — Transfer Learning Approaches for Streaming End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning Approaches for Streaming End-to-End Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-5|PAPER Thu-SS-1-6-5 — Sequence-Level Self-Learning with Multiple Hypotheses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-Level Self-Learning with Multiple Hypotheses</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-10|PAPER Thu-3-10-10 — Exploring Transformers for Large-Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Transformers for Large-Scale Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1504.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-2|PAPER Mon-1-8-2 — Double Adversarial Network Based Monaural Speech Enhancement for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Double Adversarial Network Based Monaural Speech Enhancement for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1151.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-3|PAPER Mon-3-4-3 — ATReSN-Net: Capturing Attentive Temporal Relations in Semantic Neighborhood for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATReSN-Net: Capturing Attentive Temporal Relations in Semantic Neighborhood for Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-5|PAPER Wed-2-5-5 — Speech Separation Based on Multi-Stage Elaborated Dual-Path Deep BiLSTM with Auxiliary Identity Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Separation Based on Multi-Stage Elaborated Dual-Path Deep BiLSTM with Auxiliary Identity Loss</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-3|PAPER Wed-3-7-3 — Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2440.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-10|PAPER Mon-3-1-10 — Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1233.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-5|PAPER Tue-1-8-5 — Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2105.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-6|PAPER Mon-2-5-6 — A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1303.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-4|PAPER Mon-3-4-4 — Environment Sound Classification Using Multiple Feature Channels and Attention Based Deep Convolutional Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment Sound Classification Using Multiple Feature Channels and Attention Based Deep Convolutional Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-4|PAPER Wed-1-10-4 — Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-1|PAPER Mon-3-4-1 — Neural Architecture Search on Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search on Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1950.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-6|PAPER Mon-1-7-6 — Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2567.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-8|PAPER Tue-1-7-8 — An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-9|PAPER Wed-3-3-9 — Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2829.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-9|PAPER Thu-SS-1-6-9 — Automatic Speech Recognition for ILSE-Interviews: Longitudinal Conversational Speech Recordings Covering Aging and Cognitive Decline]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Speech Recognition for ILSE-Interviews: Longitudinal Conversational Speech Recordings Covering Aging and Cognitive Decline</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-5|PAPER Wed-3-3-5 — Speaker Discrimination in Humans and Machines: Effects of Speaking Style Variability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Discrimination in Humans and Machines: Effects of Speaking Style Variability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1843.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-3|PAPER Wed-1-2-3 — An Audio-Based Wakeword-Independent Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Audio-Based Wakeword-Independent Verification System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-7|PAPER Wed-1-2-7 — Building a Robust Word-Level Wakeword Verification Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Robust Word-Level Wakeword Verification Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1252.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-1|PAPER Wed-SS-2-3-1 — Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1994.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-8|PAPER Thu-2-1-8 — Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-9|PAPER Thu-2-1-9 — Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2110.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-7|PAPER Wed-2-6-7 — Neural Speech Completion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Completion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-1|PAPER Thu-3-8-1 — Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-5|PAPER Thu-3-9-5 — Classification of Manifest Huntington Disease Using Vowel Distortion Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Classification of Manifest Huntington Disease Using Vowel Distortion Measures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2738.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-7|PAPER Wed-1-7-7 — Cross-Domain Adaptation with Discrepancy Minimization for Text-Independent Forensic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Adaptation with Discrepancy Minimization for Text-Independent Forensic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2868.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-8|PAPER Wed-1-7-8 — Open-Set Short Utterance Forensic Speaker Verification Using Teacher-Student Network with Explicit Inductive Bias]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Open-Set Short Utterance Forensic Speaker Verification Using Teacher-Student Network with Explicit Inductive Bias</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-5|PAPER Wed-SS-2-3-5 — FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1845.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-4|PAPER Wed-3-5-4 — Speaker Representation Learning Using Global Context Guided Channel and Time-Frequency Transformations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Representation Learning Using Global Context Guided Channel and Time-Frequency Transformations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2048.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-3|PAPER Thu-1-10-3 — SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-1|PAPER Thu-2-10-1 — Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-9|PAPER Thu-3-2-9 — Effect of Spectral Complexity Reduction and Number of Instruments on Musical Enjoyment with Cochlear Implants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effect of Spectral Complexity Reduction and Number of Instruments on Musical Enjoyment with Cochlear Implants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2910.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-9|PAPER Tue-1-7-9 — Understanding the Effect of Voice Quality and Accent on Talker Similarity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding the Effect of Voice Quality and Accent on Talker Similarity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-8|PAPER Wed-SS-1-4-8 — Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2807.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-10|PAPER Mon-1-8-10 — Simulating Realistically-Spatialised Simultaneous Speech Using Video-Driven Speaker Detection and the CHiME-5 Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simulating Realistically-Spatialised Simultaneous Speech Using Video-Driven Speaker Detection and the CHiME-5 Dataset</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2746.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-8|PAPER Thu-3-1-8 — Autoencoder Bottleneck Features with Multi-Task Optimisation for Improved Continuous Dysarthric Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autoencoder Bottleneck Features with Multi-Task Optimisation for Improved Continuous Dysarthric Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-9|PAPER Wed-3-9-9 — Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1411.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-5|PAPER Thu-2-9-5 — Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-12|PAPER Thu-3-7-12 — Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-6|PAPER Wed-3-5-6 — Compact Speaker Embedding: lrx-Vector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compact Speaker Embedding: lrx-Vector</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2872.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-8|PAPER Thu-1-7-8 — Length- and Noise-Aware Training Techniques for Short-Utterance Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Length- and Noise-Aware Training Techniques for Short-Utterance Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2757.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-10|PAPER Wed-3-2-10 — All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-6|PAPER Thu-3-3-6 — Detecting Audio Attacks on ASR Systems with Dropout Uncertainty]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Audio Attacks on ASR Systems with Dropout Uncertainty</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2928.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-4|PAPER Thu-3-10-4 — Transformer-Based Long-Context End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer-Based Long-Context End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2687.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-9|PAPER Wed-3-8-9 — GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1068.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-5|PAPER Thu-2-10-5 — Perception of Japanese Consonant Length by Native Speakers of Korean Differing in Japanese Learning Experience]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Japanese Consonant Length by Native Speakers of Korean Differing in Japanese Learning Experience</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0997.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-2|PAPER Thu-1-4-2 — Dual Attention in Time and Frequency Domain for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Attention in Time and Frequency Domain for Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-8|PAPER Mon-1-7-8 — Spot the Conversation: Speaker Diarisation in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Conversation: Speaker Diarisation in the Wild</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-9|PAPER Wed-1-10-9 — Now You’re Speaking My Language: Visual Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Now You’re Speaking My Language: Visual Language Identification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1065.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-1|PAPER Wed-3-12-1 — FaceFilter: Audio-Visual Speech Separation Using Still Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FaceFilter: Audio-Visual Speech Separation Using Still Images</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1113.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-2|PAPER Wed-3-12-2 — Seeing Voices and Hearing Voices: Learning Discriminative Embeddings Using Cross-Modal Self-Supervision]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Seeing Voices and Hearing Voices: Learning Discriminative Embeddings Using Cross-Modal Self-Supervision</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1553.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-2|PAPER Thu-1-10-2 — Virtual Acoustic Channel Expansion Based on Neural Networks for Weighted Prediction Error-Based Speech Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Virtual Acoustic Channel Expansion Based on Neural Networks for Weighted Prediction Error-Based Speech Dereverberation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3200.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-10|PAPER Thu-1-10-10 — Attention Wave-U-Net for Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Wave-U-Net for Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1553.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-2|PAPER Thu-1-10-2 — Virtual Acoustic Channel Expansion Based on Neural Networks for Weighted Prediction Error-Based Speech Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Virtual Acoustic Channel Expansion Based on Neural Networks for Weighted Prediction Error-Based Speech Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1459.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-8|PAPER Thu-3-6-8 — Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-5|PAPER Mon-3-9-5 — Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2233.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-5|PAPER Mon-SS-2-6-5 — The TalTech Systems for the Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The TalTech Systems for the Short-Duration Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-7|PAPER Mon-3-2-7 — Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1857.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-5|PAPER Wed-2-12-5 — Multi-Task Learning for Voice Related Recognition Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning for Voice Related Recognition Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2398.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-8|PAPER Thu-1-4-8 — Self-Supervised Contrastive Learning for Unsupervised Phoneme Segmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Contrastive Learning for Unsupervised Phoneme Segmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2380.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-3|PAPER Thu-3-3-3 — Hide and Speak: Towards Deep Neural Networks for Speech Steganography]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hide and Speak: Towards Deep Neural Networks for Speech Steganography</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2929.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-7|PAPER Thu-2-6-7 — Semantic Complexity in End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Complexity in End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-9|PAPER Thu-1-11-9 — Attention Forcing for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Forcing for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2893.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-9|PAPER Mon-2-3-9 — Understanding Racial Disparities in Automatic Speech Recognition: The Case of Habitual “be”]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding Racial Disparities in Automatic Speech Recognition: The Case of Habitual “be”</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2890.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-6|PAPER Tue-1-9-6 — Speech Sentiment and Customer Satisfaction Estimation in Socialbot Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Sentiment and Customer Satisfaction Estimation in Socialbot Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1175.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-8|PAPER Thu-1-5-8 — Links Between Production and Perception of Glottalisation in Individual Australian English Speaker/Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Links Between Production and Perception of Glottalisation in Individual Australian English Speaker/Listeners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2947.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-4|PAPER Mon-1-1-4 — ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-5|PAPER Wed-1-3-5 — Reformer-TTS: Neural Speech Synthesis with Reformer Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reformer-TTS: Neural Speech Synthesis with Reformer Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-6|PAPER Wed-3-7-6 — Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-2|PAPER Mon-3-4-2 — Acoustic Scene Classification Using Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-1|PAPER Tue-1-2-1 — Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1564.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-6|PAPER Tue-1-2-6 — Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-4|PAPER Wed-SS-1-4-4 — Surgical Mask Detection with Deep Recurrent Phonetic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Deep Recurrent Phonetic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2215.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-6|PAPER Mon-2-3-6 — Confidence Measures in Encoder-Decoder Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measures in Encoder-Decoder Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2134.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-3|PAPER Wed-2-10-3 — Discriminative Singular Spectrum Analysis for Bioacoustic Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Singular Spectrum Analysis for Bioacoustic Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-5|PAPER Tue-1-1-5 — Self-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Training for End-to-End Speech Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3094.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-8|PAPER Tue-1-1-8 — Self-Supervised Representations Improve End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Representations Improve End-to-End Speech Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2955.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-1|PAPER Thu-3-5-1 — Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-4|PAPER Wed-SS-1-4-4 — Surgical Mask Detection with Deep Recurrent Phonetic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Deep Recurrent Phonetic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-5|PAPER Wed-1-8-5 — Automatic Speech Recognition Benchmark for Air-Traffic Communications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Speech Recognition Benchmark for Air-Traffic Communications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1676.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-4|PAPER Tue-1-3-4 — Data Balancing for Boosting Performance of Low-Frequency Classes in Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Balancing for Boosting Performance of Low-Frequency Classes in Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-4|PAPER Mon-1-4-4 — Automatic Analysis of Speech Prosody in Dutch]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Analysis of Speech Prosody in Dutch</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-5|PAPER Mon-3-10-5 — Improved Model for Vocal Folds with a Polyp with Potential Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Model for Vocal Folds with a Polyp with Potential Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-7|PAPER Mon-1-10-7 — FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1315.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-8|PAPER Tue-1-8-8 — DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2320.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-2|PAPER Mon-1-9-2 — Multimodal Deception Detection Using Automatically Extracted Acoustic, Visual, and Lexical Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Deception Detection Using Automatically Extracted Acoustic, Visual, and Lexical Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2721.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-7|PAPER Wed-SS-1-6-7 — Multi-Modal Fusion with Gating Using Audio, Lexical and Disfluency Features for Alzheimer’s Dementia Recognition from Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Fusion with Gating Using Audio, Lexical and Disfluency Features for Alzheimer’s Dementia Recognition from Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-4|PAPER Mon-2-5-4 — Speech Recognition and Multi-Speaker Diarization of Long Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Recognition and Multi-Speaker Diarization of Long Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1431.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-7|PAPER Thu-3-9-7 — Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-1|PAPER Mon-2-5-1 — Augmenting Turn-Taking Prediction with Wearable Eye Activity During Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Turn-Taking Prediction with Wearable Eye Activity During Conversation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-8|PAPER Mon-3-2-8 — Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2030.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-10|PAPER Tue-1-9-10 — How Ordinal Are Your Data?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How Ordinal Are Your Data?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3135.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-4|PAPER Thu-3-1-4 — Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2683.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-2|PAPER Tue-1-4-2 — Categorization of Whistled Consonants by French Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Categorization of Whistled Consonants by French Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2697.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-3|PAPER Tue-1-4-3 — Whistled Vowel Identification by French Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whistled Vowel Identification by French Listeners</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2532.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-9|PAPER Tue-1-10-9 — How Rhythm and Timbre Encode Mooré Language in Bendré Drummed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How Rhythm and Timbre Encode Mooré Language in Bendré Drummed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1431.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-7|PAPER Thu-3-9-7 — Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1746.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-6|PAPER Thu-1-5-6 — Independent and Automatic Evaluation of Speaker-Independent Acoustic-to-Articulatory Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Independent and Automatic Evaluation of Speaker-Independent Acoustic-to-Articulatory Reconstruction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1671.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-11|PAPER Thu-3-7-11 — Perceptimatic: A Human Speech Perception Benchmark for Unsupervised Subword Modelling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptimatic: A Human Speech Perception Benchmark for Unsupervised Subword Modelling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2588.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-5|PAPER Thu-2-11-5 — Speech Enhancement with Stochastic Temporal Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Stochastic Temporal Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-10|PAPER Mon-1-2-10 — A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-7|PAPER Mon-2-11-7 — Unsupervised Regularization-Based Adaptive Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Regularization-Based Adaptive Training for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-7|PAPER Mon-3-4-7 — An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1390.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-5|PAPER Mon-3-7-5 — Adaptive Speaker Normalization for CTC-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Speaker Normalization for CTC-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-3|PAPER Tue-1-2-3 — An Adaptive X-Vector Model for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive X-Vector Model for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2472.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-5|PAPER Thu-2-2-5 — Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-2|PAPER Thu-2-11-2 — A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1647.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-4|PAPER Wed-3-10-4 — Nonparallel Emotional Speech Conversion Using VAE-GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion Using VAE-GAN</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-1|PAPER Thu-3-2-1 — Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-9|PAPER Thu-3-4-9 — Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1900.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-6|PAPER Mon-1-2-6 — Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-5|PAPER Wed-SS-2-7-5 — Neural Speech Decoding for Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Decoding for Amyotrophic Lateral Sclerosis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-7|PAPER Thu-3-1-7 — Tongue and Lip Motion Patterns in Alaryngeal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tongue and Lip Motion Patterns in Alaryngeal Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1196.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-1|PAPER Thu-3-6-1 — Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2907.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-6|PAPER Tue-1-3-6 — Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-3|PAPER Wed-2-4-3 — Learning Better Speech Representations by Worsening Interference]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Better Speech Representations by Worsening Interference</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3200.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-10|PAPER Thu-1-10-10 — Attention Wave-U-Net for Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Wave-U-Net for Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-9|PAPER Thu-2-10-9 — Cross-Linguistic Perception of Utterances with Willingness and Reluctance in Mandarin by Korean L2 Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Linguistic Perception of Utterances with Willingness and Reluctance in Mandarin by Korean L2 Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-13|PAPER Wed-SS-1-6-13 — Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-2|PAPER Mon-3-9-2 — iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-1|PAPER Wed-1-3-1 — Using Cyclic Noise as the Source Signal for Neural Source-Filter-Based Speech Waveform Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Cyclic Noise as the Source Signal for Neural Source-Filter-Based Speech Waveform Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1030.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-9|PAPER Wed-1-11-9 — Noise Tokens: Learning Neural Noise Templates for Environment-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noise Tokens: Learning Neural Noise Templates for Environment-Aware Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1613.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-7|PAPER Thu-1-1-7 — Reverberation Modeling for Source-Filter-Based Neural Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reverberation Modeling for Source-Filter-Based Neural Vocoder</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-2|PAPER Thu-1-11-2 — Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1615.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-7|PAPER Thu-2-9-7 — Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-1|PAPER Thu-3-2-1 — Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1702.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-3|PAPER Thu-2-6-3 — Improved Learning of Word Embeddings with Word Definitions and Semantic Injection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Learning of Word Embeddings with Word Definitions and Semantic Injection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-3|PAPER Mon-1-5-3 — VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-4|PAPER Thu-2-9-4 — Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-8|PAPER Wed-3-5-8 — Deep Speaker Embedding with Long Short Term Centroid Learning for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Embedding with Long Short Term Centroid Learning for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2538.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-5|PAPER Thu-1-7-5 — Angular Margin Centroid Loss for Text-Independent Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Angular Margin Centroid Loss for Text-Independent Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2430.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-6|PAPER Wed-3-2-6 — Identify Speakers in Cocktail Parties with End-to-End Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identify Speakers in Cocktail Parties with End-to-End Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2117.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-3|PAPER Wed-3-12-3 — Fusion Architectures for Word-Based Audiovisual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fusion Architectures for Word-Based Audiovisual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2918.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-11|PAPER Thu-2-9-11 — Controllable Neural Prosody Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controllable Neural Prosody Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-9|PAPER Wed-1-8-9 — Evaluating Automatically Generated Phoneme Captions for Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Automatically Generated Phoneme Captions for Images</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-10|PAPER Wed-1-7-10 — Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-13|PAPER Thu-3-7-13 — Glottal Closure Instants Detection from EGG Signal by Classification Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Glottal Closure Instants Detection from EGG Signal by Classification Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-8|PAPER Mon-3-2-8 — Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2234.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-8|PAPER Thu-1-10-8 — Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2493.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-9|PAPER Thu-3-11-9 — U-Net Based Direct-Path Dominance Test for Robust Direction-of-Arrival Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">U-Net Based Direct-Path Dominance Test for Robust Direction-of-Arrival Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-5|PAPER Mon-2-3-5 — Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1700.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-3|PAPER Wed-2-12-3 — Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2237.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-4|PAPER Thu-2-2-4 — Speech Emotion Recognition with Discriminative Feature Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition with Discriminative Feature Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3188.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-11|PAPER Mon-1-5-11 — Neural Homomorphic Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Homomorphic Vocoder</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1632.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-2|PAPER Mon-2-9-2 — Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1255.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-3|PAPER Mon-3-2-3 — Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-1|PAPER Thu-1-4-1 — Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2624.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-8|PAPER Wed-3-8-8 — Meta Multi-Task Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta Multi-Task Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-5|PAPER Wed-3-1-5 — Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2276.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-6|PAPER Wed-3-3-6 — Automatic Prediction of Confidence Level from Children’s Oral Reading Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Prediction of Confidence Level from Children’s Oral Reading Recordings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1578.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-3|PAPER Thu-3-11-3 — Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-9|PAPER Thu-3-5-9 — Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-5|PAPER Wed-1-8-5 — Automatic Speech Recognition Benchmark for Air-Traffic Communications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Speech Recognition Benchmark for Air-Traffic Communications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2828.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-6|PAPER Mon-3-1-6 — Multilingual Jointly Trained Acoustic and Written Word Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Jointly Trained Acoustic and Written Word Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2612.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-7|PAPER Tue-1-4-7 — Mandarin and English Adults’ Cue-Weighting of Lexical Stress]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin and English Adults’ Cue-Weighting of Lexical Stress</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2730.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-5|PAPER Wed-SS-1-12-5 — A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-6|PAPER Wed-SS-1-12-6 — PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2261.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-5|PAPER Wed-2-9-5 — StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-9|PAPER Wed-3-7-9 — Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1563.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-2|PAPER Mon-3-3-2 — SEANet: A Multi-Modal Speech Enhancement Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SEANet: A Multi-Modal Speech Enhancement Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1508.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-10|PAPER Mon-2-9-10 — Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2731.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-3|PAPER Thu-3-7-3 — Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-9|PAPER Thu-2-6-9 — Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-3|PAPER Mon-2-2-3 — Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2593.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-2|PAPER Thu-3-5-2 — Transliteration Based Data Augmentation for Training Multilingual ASR Acoustic Models in Low Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transliteration Based Data Augmentation for Training Multilingual ASR Acoustic Models in Low Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-8|PAPER Mon-3-7-8 — Black-Box Adaptation of ASR for Accented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Black-Box Adaptation of ASR for Accented Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-3|PAPER Tue-SS-1-6-3 — X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1461.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-2|PAPER Wed-3-4-2 — Conditional Spoken Digit Generation with StyleGAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Spoken Digit Generation with StyleGAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-5|PAPER Tue-1-10-5 — Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3040.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-9|PAPER Mon-1-10-9 — Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-2|PAPER Mon-2-3-2 — Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2154.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-7|PAPER Wed-3-1-7 — Automatic Detection of Accent and Lexical Pronunciation Errors in Spontaneous Non-Native English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Accent and Lexical Pronunciation Errors in Spontaneous Non-Native English Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1890.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-4|PAPER Thu-1-8-4 — Universal Adversarial Attacks on Spoken Language Assessment Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Attacks on Spoken Language Assessment Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-5|PAPER Thu-1-8-5 — Ensemble Approaches for Uncertainty in Spoken Language Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Approaches for Uncertainty in Spoken Language Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2691.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-9|PAPER Wed-2-6-9 — Multimodal Sign Language Recognition via Temporal Deformable Convolutional Sequence Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Sign Language Recognition via Temporal Deformable Convolutional Sequence Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-6|PAPER Mon-1-10-6 — CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-7|PAPER Mon-1-10-7 — FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1797.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-8|PAPER Mon-2-2-8 — Continual Learning for Multi-Dialect Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Continual Learning for Multi-Dialect Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-3|PAPER Thu-3-8-3 — Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1399.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-1|PAPER Mon-3-5-1 — Singing Synthesis: With a Little Help from my Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Synthesis: With a Little Help from my Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2134.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-3|PAPER Wed-2-10-3 — Discriminative Singular Spectrum Analysis for Bioacoustic Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Singular Spectrum Analysis for Bioacoustic Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-7|PAPER Thu-2-1-7 — Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-7|PAPER Thu-3-7-7 — Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1484.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-3|PAPER Mon-1-2-3 — Online Directional Speech Enhancement Using Geometrically Constrained Independent Vector Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Directional Speech Enhancement Using Geometrically Constrained Independent Vector Analysis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3019.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-8|PAPER Mon-1-4-8 — Robust Pitch Regression with Voiced/Unvoiced Classification in Nonstationary Noise Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Pitch Regression with Voiced/Unvoiced Classification in Nonstationary Noise Environments</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2421.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-7|PAPER Wed-1-11-7 — Low-Latency Single Channel Speech Dereverberation Using U-Net Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Latency Single Channel Speech Dereverberation Using U-Net Convolutional Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2982.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-8|PAPER Wed-1-11-8 — Single-Channel Speech Enhancement by Subspace Affinity Minimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single-Channel Speech Enhancement by Subspace Affinity Minimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2110.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-7|PAPER Wed-2-6-7 — Neural Speech Completion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Completion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1072.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-3|PAPER Thu-1-1-3 — A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2477.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-11|PAPER Wed-3-10-11 — Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1301.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-10|PAPER Mon-3-7-10 — Frame-Wise Online Unsupervised Adaptation of DNN-HMM Acoustic Model from Perspective of Robust Adaptive Filtering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Frame-Wise Online Unsupervised Adaptation of DNN-HMM Acoustic Model from Perspective of Robust Adaptive Filtering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1891.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-3|PAPER Mon-2-8-3 — Contrastive Predictive Coding of Audio with an Adversary]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contrastive Predictive Coding of Audio with an Adversary</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-7|PAPER Thu-2-1-7 — Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-6|PAPER Wed-1-11-6 — Adaptive Neural Speech Enhancement with a Denoising Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Neural Speech Enhancement with a Denoising Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3202.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-10|PAPER Thu-2-11-10 — Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-1|PAPER Thu-1-3-1 — Neural Language Modeling with Implicit Cache Pointers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Language Modeling with Implicit Cache Pointers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2909.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-4|PAPER Thu-3-8-4 — Efficient MDI Adaptation for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient MDI Adaptation for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-1|PAPER Mon-SS-1-6-1 — Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-1|PAPER Wed-1-2-1 — Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-1|PAPER Thu-2-10-1 — Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2477.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-11|PAPER Wed-3-10-11 — Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2477.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-11|PAPER Wed-3-10-11 — Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2477.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-11|PAPER Wed-3-10-11 — Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2477.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-11|PAPER Wed-3-10-11 — Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2138.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-9|PAPER Mon-1-2-9 — Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1591.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-8|PAPER Mon-3-3-8 — Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2210.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-8|PAPER Mon-3-11-8 — Listen to What You Want: Neural Network-Based Universal Sound Selector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen to What You Want: Neural Network-Based Universal Sound Selector</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2388.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-8|PAPER Wed-2-4-8 — Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2519.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-7|PAPER Wed-3-2-7 — Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-4|PAPER Mon-1-9-4 — WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1672.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-5|PAPER Thu-1-5-5 — Quantification of Transducer Misalignment in Ultrasound Tongue Imaging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantification of Transducer Misalignment in Ultrasound Tongue Imaging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2907.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-6|PAPER Tue-1-3-6 — Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1591.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-8|PAPER Mon-3-3-8 — Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-12|PAPER Wed-3-10-12 — GAN-Based Data Generation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GAN-Based Data Generation for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-4|PAPER Mon-2-11-4 — A Federated Approach in Training Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Federated Approach in Training Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-5|PAPER Thu-SS-1-6-5 — Sequence-Level Self-Learning with Multiple Hypotheses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-Level Self-Learning with Multiple Hypotheses</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-1|PAPER Mon-1-7-1 — End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1050.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-9|PAPER Mon-1-8-9 — Utterance-Wise Meeting Transcription System Using Asynchronous Distributed Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance-Wise Meeting Transcription System Using Asynchronous Distributed Microphones</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-3|PAPER Wed-1-9-3 — Meta-Learning for Speech Emotion Recognition Considering Ambiguity of Emotion Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta-Learning for Speech Emotion Recognition Considering Ambiguity of Emotion Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2516.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-2|PAPER Wed-SS-1-6-2 — Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4002.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-1|PAPER Mon-2-12-1 — Smart Tube: A Biofeedback System for Vocal Training and Therapy Through Tube Phonation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Smart Tube: A Biofeedback System for Vocal Training and Therapy Through Tube Phonation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2293.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-5|PAPER Thu-2-4-5 — Semi-Supervised Learning for Character Expression of Spoken Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning for Character Expression of Spoken Dialogue Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3167.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-9|PAPER Wed-1-3-9 — Multi-Speaker Text-to-Speech Synthesis Using Deep Gaussian Processes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Speaker Text-to-Speech Synthesis Using Deep Gaussian Processes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-6|PAPER Mon-3-8-6 — Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-7|PAPER Wed-3-10-7 — Controlling the Strength of Emotions in Speech-Like Emotional Sound Generated by WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controlling the Strength of Emotions in Speech-Like Emotional Sound Generated by WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-1|PAPER Mon-1-12-1 — ICE-Talk: An Interface for a Controllable Expressive Talking Machine]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ICE-Talk: An Interface for a Controllable Expressive Talking Machine</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-3|PAPER Wed-3-10-3 — Laughter Synthesis: Combining Seq2seq Modeling with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Laughter Synthesis: Combining Seq2seq Modeling with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-1|PAPER Thu-2-10-1 — Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2893.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-9|PAPER Mon-2-3-9 — Understanding Racial Disparities in Automatic Speech Recognition: The Case of Habitual “be”]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding Racial Disparities in Automatic Speech Recognition: The Case of Habitual “be”</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2732.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-6|PAPER Mon-2-2-6 — CAT: A CTC-CRF Based ASR Toolkit Bridging the Hybrid and the End-to-End Approaches Towards Data Efficiency and Low Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CAT: A CTC-CRF Based ASR Toolkit Bridging the Hybrid and the End-to-End Approaches Towards Data Efficiency and Low Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3095.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-6|PAPER Wed-1-10-6 — Bilingual Acoustic Voice Variation is Similarly Structured Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bilingual Acoustic Voice Variation is Similarly Structured Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-7|PAPER Thu-3-5-7 — Style Variation as a Vantage Point for Code-Switching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Style Variation as a Vantage Point for Code-Switching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2935.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-6|PAPER Thu-2-11-6 — Visual Speech In Real Noisy Environments (VISION): A Novel Benchmark Dataset and Deep Learning-Based Baseline System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Visual Speech In Real Noisy Environments (VISION): A Novel Benchmark Dataset and Deep Learning-Based Baseline System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2450.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-7|PAPER Mon-1-3-7 — Comparing EEG Analyses with Different Epoch Alignments in an Auditory Lexical Decision Experiment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparing EEG Analyses with Different Epoch Alignments in an Auditory Lexical Decision Experiment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1068.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-5|PAPER Thu-2-10-5 — Perception of Japanese Consonant Length by Native Speakers of Korean Differing in Japanese Learning Experience]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Japanese Consonant Length by Native Speakers of Korean Differing in Japanese Learning Experience</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4008.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-4|PAPER Mon-2-12-4 — Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2639.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-6|PAPER Wed-1-8-6 — Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-9|PAPER Tue-1-3-9 — An Effective Domain Adaptive Post-Training Method for BERT in Response Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Domain Adaptive Post-Training Method for BERT in Response Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-10|PAPER Wed-2-2-10 — Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2024.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-6|PAPER Mon-3-10-6 — Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-1|PAPER Wed-1-2-1 — Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1766.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-3|PAPER Thu-1-8-3 — Targeted Content Feedback in Spoken Language Learning and Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Targeted Content Feedback in Spoken Language Learning and Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1195.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-6|PAPER Wed-2-6-6 — Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-9|PAPER Thu-1-8-9 — Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-6|PAPER Mon-3-8-6 — Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2293.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-5|PAPER Thu-2-4-5 — Semi-Supervised Learning for Character Expression of Spoken Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning for Character Expression of Spoken Dialogue Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-2|PAPER Mon-1-10-2 — Developing an Open-Source Corpus of Yoruba Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing an Open-Source Corpus of Yoruba Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-2|PAPER Mon-SS-2-6-2 — SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1966.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-8|PAPER Mon-2-10-8 — Adversarial Separation Network for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Separation Network for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-2|PAPER Mon-3-2-2 — POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-1|PAPER Thu-1-7-1 — Dynamic Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Margin Softmax Loss for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1090.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-2|PAPER Thu-SS-2-5-2 — Extrapolating False Alarm Rates in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extrapolating False Alarm Rates in Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-2|PAPER Thu-1-7-2 — On Parameter Adaptation in Softmax-Based Cross-Entropy Loss for Improved Convergence Speed and Accuracy in DNN-Based Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Parameter Adaptation in Softmax-Based Cross-Entropy Loss for Improved Convergence Speed and Accuracy in DNN-Based Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-7|PAPER Wed-3-1-7 — Automatic Detection of Accent and Lexical Pronunciation Errors in Spontaneous Non-Native English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Accent and Lexical Pronunciation Errors in Spontaneous Non-Native English Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1547.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-4|PAPER Tue-1-7-4 — Enhancing Sequence-to-Sequence Text-to-Speech with Morphology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Sequence-to-Sequence Text-to-Speech with Morphology</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1630.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-4|PAPER Wed-3-4-4 — Speaker-Independent Mel-Cepstrum Estimation from Articulator Movements Using D-Vector Input]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Independent Mel-Cepstrum Estimation from Articulator Movements Using D-Vector Input</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-2|PAPER Mon-3-2-2 — POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-6|PAPER Mon-3-8-6 — Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-6|PAPER Wed-1-3-6 — CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-6|PAPER Wed-1-11-6 — Adaptive Neural Speech Enhancement with a Denoising Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Neural Speech Enhancement with a Denoising Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3202.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-10|PAPER Thu-2-11-10 — Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1630.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-4|PAPER Wed-3-4-4 — Speaker-Independent Mel-Cepstrum Estimation from Articulator Movements Using D-Vector Input]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Independent Mel-Cepstrum Estimation from Articulator Movements Using D-Vector Input</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Cross-Lingual Speaker Verification with Domain-Balanced Hard Prototype Mining and Language-Dependent Score Normalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Speaker Verification with Domain-Balanced Hard Prototype Mining and Language-Dependent Score Normalization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2650.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-7|PAPER Thu-1-7-7 — ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-6|PAPER Mon-3-3-6 — Improving Speech Intelligibility Through Speaker Dependent and Independent Spectral Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Intelligibility Through Speaker Dependent and Independent Spectral Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-1|PAPER Thu-2-6-1 — Multimodal Emotion Recognition Using Cross-Modal Attention and 1D Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Emotion Recognition Using Cross-Modal Attention and 1D Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-9|PAPER Wed-2-1-9 — Towards Automatic Assessment of Voice Disorders: A Clinical Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Automatic Assessment of Voice Disorders: A Clinical Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-7|PAPER Thu-3-1-7 — Tongue and Lip Motion Patterns in Alaryngeal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tongue and Lip Motion Patterns in Alaryngeal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1339.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-2|PAPER Tue-1-9-2 — Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-3|PAPER Tue-1-9-3 — Differences in Gradient Emotion Perception: Human vs. Alexa Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Differences in Gradient Emotion Perception: Human vs. Alexa Voices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2439.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-2|PAPER Wed-SS-1-12-2 — INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising]]</div>|^<div class="cpauthorindexpersoncardpapertitle">INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-10|PAPER Mon-3-4-10 — Deep Learning Based Open Set Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Open Set Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1207.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-7|PAPER Thu-3-8-7 — Contemporary Polish Language Model (Version 2) Using Big Data and Sub-Word Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contemporary Polish Language Model (Version 2) Using Big Data and Sub-Word Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-1|PAPER Mon-2-2-1 — Fast and Slow Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Slow Acoustic Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2904.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-9|PAPER Tue-1-5-9 — Bandpass Noise Generation and Augmentation for Unified ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bandpass Noise Generation and Augmentation for Unified ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-1|PAPER Wed-1-5-1 — 1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-10|PAPER Wed-1-5-10 — Transfer Learning Approaches for Streaming End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning Approaches for Streaming End-to-End Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3188.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-11|PAPER Mon-1-5-11 — Neural Homomorphic Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Homomorphic Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-2|PAPER Thu-2-4-2 — An Audio-Enriched BERT-Based Framework for Spoken Multiple-Choice Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Audio-Enriched BERT-Based Framework for Spoken Multiple-Choice Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-6|PAPER Wed-3-7-6 — Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2722.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-1|PAPER Thu-2-8-1 — State Sequence Pooling Training of Acoustic Models for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State Sequence Pooling Training of Acoustic Models for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1501.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-5|PAPER Tue-1-2-5 — Sum-Product Networks for Robust Automatic Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sum-Product Networks for Robust Automatic Speaker Identification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1551.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-7|PAPER Wed-2-5-7 — A Deep Learning-Based Kalman Filter for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Learning-Based Kalman Filter for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2483.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-10|PAPER Mon-3-11-10 — Speaker-Aware Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2067.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-6|PAPER Mon-1-9-6 — Group Gated Fusion on Attention-Based Bidirectional Alignment for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Group Gated Fusion on Attention-Based Bidirectional Alignment for Multimodal Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1552.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-2|PAPER Wed-SS-1-4-2 — Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1644.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-5|PAPER Wed-2-2-5 — Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2014.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-6|PAPER Wed-3-10-6 — Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1112.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-10|PAPER Thu-2-8-10 — Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-10|PAPER Mon-1-4-10 — Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3078.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-7|PAPER Tue-1-1-7 — Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2694.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-5|PAPER Mon-2-1-5 — Ensemble of Students Taught by Probabilistic Teachers to Improve Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble of Students Taught by Probabilistic Teachers to Improve Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-13|PAPER Wed-SS-1-6-13 — Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2312.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-2|PAPER Wed-2-6-2 — Multimodal Speech Emotion Recognition Using Cross Attention with Aligned Audio and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Speech Emotion Recognition Using Cross Attention with Aligned Audio and Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-8|PAPER Wed-1-2-8 — A Transformer-Based Audio Captioning Model with Keyword Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Transformer-Based Audio Captioning Model with Keyword Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2947.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-4|PAPER Mon-1-1-4 — ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1094.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-1|PAPER Tue-1-7-1 — g2pM: A Neural Grapheme-to-Phoneme Conversion Package for Mandarin Chinese Based on a New Open Benchmark Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">g2pM: A Neural Grapheme-to-Phoneme Conversion Package for Mandarin Chinese Based on a New Open Benchmark Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-2|PAPER Thu-1-9-2 — Conditional Response Augmentation for Dialogue Using Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Response Augmentation for Dialogue Using Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2490.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-3|PAPER Mon-1-3-3 — Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-6|PAPER Wed-3-1-6 — ASR-Free Pronunciation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Free Pronunciation Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2542.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-9|PAPER Wed-3-5-9 — Neural Discriminant Analysis for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Discriminant Analysis for Deep Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2562.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-6|PAPER Thu-1-7-6 — Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1341.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-8|PAPER Thu-1-9-8 — Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1750.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-4|PAPER Thu-2-8-4 — Do End-to-End Speech Recognition Models Care About Context?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do End-to-End Speech Recognition Models Care About Context?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1750.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-4|PAPER Thu-2-8-4 — Do End-to-End Speech Recognition Models Care About Context?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do End-to-End Speech Recognition Models Care About Context?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-4|PAPER Wed-2-6-4 — Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-4|PAPER Wed-1-10-4 — Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2532.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-9|PAPER Tue-1-10-9 — How Rhythm and Timbre Encode Mooré Language in Bendré Drummed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How Rhythm and Timbre Encode Mooré Language in Bendré Drummed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2587.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-5|PAPER Wed-SS-1-6-5 — Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-9|PAPER Thu-1-4-9 — That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2103.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-6|PAPER Mon-1-5-6 — What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1241.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-2|PAPER Tue-1-1-2 — Efficient Wait-k Models for Simultaneous Machine Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Wait-k Models for Simultaneous Machine Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-3|PAPER Tue-1-1-3 — Investigating Self-Supervised Pre-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Self-Supervised Pre-Training for End-to-End Speech Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-1|PAPER Tue-1-3-1 — Modeling ASR Ambiguity for Neural Dialogue State Tracking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling ASR Ambiguity for Neural Dialogue State Tracking</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2103.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-6|PAPER Mon-1-5-6 — What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-2|PAPER Mon-1-12-2 — Kaldi-Web: An Installation-Free, On-Device Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Kaldi-Web: An Installation-Free, On-Device Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2785.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-8|PAPER Thu-2-4-8 — Neural Representations of Dialogical History for Improving Upcoming Turn Acoustic Parameters Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Representations of Dialogical History for Improving Upcoming Turn Acoustic Parameters Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1461.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-2|PAPER Wed-3-4-2 — Conditional Spoken Digit Generation with StyleGAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Spoken Digit Generation with StyleGAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2734.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-4|PAPER Thu-3-3-4 — Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-1|PAPER Wed-2-6-1 — Unsupervised vs. Transfer Learning for Multimodal One-Shot Matching of Speech and Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised vs. Transfer Learning for Multimodal One-Shot Matching of Speech and Images</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-2|PAPER Thu-3-7-2 — Vector-Quantized Neural Networks for Acoustic Unit Discovery in the ZeroSpeech 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Quantized Neural Networks for Acoustic Unit Discovery in the ZeroSpeech 2020 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1949.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-8|PAPER Tue-1-3-8 — Deep F-Measure Maximization for End-to-End Speech Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep F-Measure Maximization for End-to-End Speech Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2430.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-6|PAPER Wed-3-2-6 — Identify Speakers in Cocktail Parties with End-to-End Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identify Speakers in Cocktail Parties with End-to-End Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-3|PAPER Mon-3-7-3 — Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1590.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-3|PAPER Wed-3-4-3 — Towards Universal Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Universal Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1463.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-6|PAPER Thu-1-1-6 — An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-6|PAPER Mon-3-11-6 — A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-10|PAPER Mon-1-2-10 — A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1652.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-5|PAPER Mon-1-3-5 — Contribution of RMS-Level-Based Speech Segments to Target Speech Decoding Under Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contribution of RMS-Level-Based Speech Segments to Target Speech Decoding Under Noisy Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2375.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-5|PAPER Mon-2-4-5 — The Phonology and Phonetics of Kaifeng Mandarin Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Phonology and Phonetics of Kaifeng Mandarin Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2530.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-9|PAPER Mon-2-7-9 — Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2555.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-5|PAPER Mon-3-5-5 — Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-6|PAPER Wed-1-2-6 — AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-8|PAPER Wed-1-5-8 — Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-10|PAPER Wed-3-10-10 — Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2688.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-4|PAPER Wed-SS-3-11-4 — NPU Speaker Verification System for INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NPU Speaker Verification System for INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1955.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-4|PAPER Thu-SS-2-5-4 — Inaudible Adversarial Perturbations for Targeted Attack in Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Inaudible Adversarial Perturbations for Targeted Attack in Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1811.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-4|PAPER Thu-2-6-4 — Wake Word Detection with Alignment-Free Lattice-Free MMI]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Wake Word Detection with Alignment-Free Lattice-Free MMI</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-8|PAPER Mon-3-1-8 — Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2285.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-5|PAPER Thu-1-4-5 — End-to-End Domain-Adversarial Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Domain-Adversarial Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-2|PAPER Mon-2-4-2 — Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1697.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-3|PAPER Mon-3-11-3 — Multimodal Target Speech Separation with Voice and Face References]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Target Speech Separation with Voice and Face References</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-2|PAPER Thu-2-11-2 — A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1484.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-3|PAPER Mon-1-2-3 — Online Directional Speech Enhancement Using Geometrically Constrained Independent Vector Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Directional Speech Enhancement Using Geometrically Constrained Independent Vector Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-3|PAPER Thu-SS-2-5-3 — Self-Supervised Spoofing Audio Detection Scheme]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Spoofing Audio Detection Scheme</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-7|PAPER Thu-2-8-7 — Domain Adaptation Using Class Similarity for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation Using Class Similarity for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2688.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-4|PAPER Wed-SS-3-11-4 — NPU Speaker Verification System for INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NPU Speaker Verification System for INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1410.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-3|PAPER Mon-3-8-3 — XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-1|PAPER Mon-2-7-1 — Recognition-Synthesis Based Non-Parallel Voice Conversion with Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition-Synthesis Based Non-Parallel Voice Conversion with Adversarial Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-6|PAPER Mon-2-8-6 — An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1922.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-7|PAPER Wed-2-12-7 — An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-1|PAPER Thu-1-2-1 — Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2141.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-4|PAPER Tue-1-8-4 — Combination of End-to-End and Hybrid Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combination of End-to-End and Hybrid Models for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1292.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-2|PAPER Wed-1-5-2 — Low Latency End-to-End Streaming Speech Recognition with a Scout Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency End-to-End Streaming Speech Recognition with a Scout Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-10|PAPER Thu-3-10-10 — Exploring Transformers for Large-Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Transformers for Large-Scale Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-2|PAPER Mon-1-2-2 — Neural Spatio-Temporal Beamformer for Target Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatio-Temporal Beamformer for Target Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2568.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-8|PAPER Wed-2-2-8 — An Investigation of Few-Shot Learning in Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Few-Shot Learning in Spoken Term Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-4|PAPER Wed-1-9-4 — Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1047.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-2|PAPER Wed-2-5-2 — An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-7|PAPER Wed-2-2-7 — Multi-Scale Convolution for Robust Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale Convolution for Robust Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-1|PAPER Tue-1-1-1 — A DNN-HMM-DNN Hybrid Model for Discovering Word-Like Units from Spoken Captions and Image Regions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A DNN-HMM-DNN Hybrid Model for Discovering Word-Like Units from Spoken Captions and Image Regions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1923.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-2|PAPER Mon-1-11-2 — The XMUSPEECH System for the AP19-OLR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for the AP19-OLR Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1960.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-3|PAPER Mon-1-11-3 — On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1704.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-3|PAPER Mon-SS-2-6-3 — The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-3|PAPER Thu-3-10-3 — Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-6|PAPER Mon-2-8-6 — An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1922.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-7|PAPER Wed-2-12-7 — An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2024.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-6|PAPER Mon-3-10-6 — Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1570.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-1|PAPER Thu-2-4-1 — SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2134.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-3|PAPER Wed-2-10-3 — Discriminative Singular Spectrum Analysis for Bioacoustic Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Singular Spectrum Analysis for Bioacoustic Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-2|PAPER Mon-1-5-2 — FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-3|PAPER Thu-3-10-3 — Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2154.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1683.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-2|PAPER Thu-2-6-2 — Abstractive Spoken Document Summarization Using Hierarchical Model with Multi-Stage Attention Diversity Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Abstractive Spoken Document Summarization Using Hierarchical Model with Multi-Stage Attention Diversity Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1416.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-4|PAPER Mon-2-7-4 — TTS Skins: Speaker Conversion via ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TTS Skins: Speaker Conversion via ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-7|PAPER Mon-2-7-7 — Unsupervised Cross-Domain Singing Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Cross-Domain Singing Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-2|PAPER Mon-3-5-2 — Peking Opera Synthesis via Duration Informed Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Peking Opera Synthesis via Duration Informed Attention Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2612.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-7|PAPER Tue-1-4-7 — Mandarin and English Adults’ Cue-Weighting of Lexical Stress]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin and English Adults’ Cue-Weighting of Lexical Stress</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Aazami, Ashkan|AUTHOR Ashkan Aazami]]|
|[[Abad, Alberto|AUTHOR Alberto Abad]]|
|[[Abate, Solomon Teferra|AUTHOR Solomon Teferra Abate]]|
|[[Abavisani, Ali|AUTHOR Ali Abavisani]]|
|[[Abdelfattah, Mohamed S.|AUTHOR Mohamed S. Abdelfattah]]|
|[[Abdel-hamid, Ossama|AUTHOR Ossama Abdel-hamid]]|
|[[Abdelwahab, Mohammed|AUTHOR Mohammed Abdelwahab]]|
|[[Abderrazek, Sondes|AUTHOR Sondes Abderrazek]]|
|[[Abdullah, Badr M.|AUTHOR Badr M. Abdullah]]|
|[[Abe, Masanobu|AUTHOR Masanobu Abe]]|
|[[Abel, Andrew|AUTHOR Andrew Abel]]|
|[[Abhayapala, Thushara D.|AUTHOR Thushara D. Abhayapala]]|
|[[Abhyankar, Apurva|AUTHOR Apurva Abhyankar]]|
|[[Abraham, Ajish K.|AUTHOR Ajish K. Abraham]]|
|[[Abraham, Basil|AUTHOR Basil Abraham]]|
|[[Abulimiti, Ayimunishagu|AUTHOR Ayimunishagu Abulimiti]]|
|[[Achari, Rakesh Prasanth|AUTHOR Rakesh Prasanth Achari]]|
|[[Adachi, Seiji|AUTHOR Seiji Adachi]]|
|[[Adda-Decker, Martine|AUTHOR Martine Adda-Decker]]|
|[[Adelani, David Ifeoluwa|AUTHOR David Ifeoluwa Adelani]]|
|[[Adi, Yossi|AUTHOR Yossi Adi]]|
|[[Adiga, Nagaraj|AUTHOR Nagaraj Adiga]]|
|[[Adya, Saurabh|AUTHOR Saurabh Adya]]|
|[[Afouras, Triantafyllos|AUTHOR Triantafyllos Afouras]]|
|[[Afshan, Amber|AUTHOR Amber Afshan]]|
|[[Agarwal, Ayush|AUTHOR Ayush Agarwal]]|
|[[Agarwal, Rishika|AUTHOR Rishika Agarwal]]|
|[[Aggarwal, Arshiya|AUTHOR Arshiya Aggarwal]]|
|[[Agić, Željko|AUTHOR Željko Agić]]|
|[[Agrawal, Ashish Kumar|AUTHOR Ashish Kumar Agrawal]]|
|[[Agrawal, Purvi|AUTHOR Purvi Agrawal]]|
|[[Ahmad, Waquar|AUTHOR Waquar Ahmad]]|
|[[Ahmed, Beena|AUTHOR Beena Ahmed]]|
|[[Ai, Yang|AUTHOR Yang Ai]]|
|[[Aichner, Robert|AUTHOR Robert Aichner]]|
|[[Akagi, Masato|AUTHOR Masato Akagi]]|
|[[Akbarzadeh, Sara|AUTHOR Sara Akbarzadeh]]|
|[[Akimoto, Kosuke|AUTHOR Kosuke Akimoto]]|
|[[Akula, Jayaprakash|AUTHOR Jayaprakash Akula]]|
|[[Alam, Jahangir|AUTHOR Jahangir Alam]]|
|[[AlBadawy, Ehab A.|AUTHOR Ehab A. AlBadawy]]|
|[[Albes, Merlin|AUTHOR Merlin Albes]]|
|[[Aldeneh, Zakaria|AUTHOR Zakaria Aldeneh]]|
|[[Aleksic, Petar|AUTHOR Petar Aleksic]]|
|[[Alexandridis, Anastasios|AUTHOR Anastasios Alexandridis]]|
|[[Algayres, Robin|AUTHOR Robin Algayres]]|
|[[Alhinti, Lubna|AUTHOR Lubna Alhinti]]|
|[[Ali, Ahmed|AUTHOR Ahmed Ali]]|
|[[Alku, Paavo|AUTHOR Paavo Alku]]|
|[[Alumäe, Tanel|AUTHOR Tanel Alumäe]]|
|[[Alwan, Abeer|AUTHOR Abeer Alwan]]|
|[[Amiriparian, Shahin|AUTHOR Shahin Amiriparian]]|
|[[An, Keyu|AUTHOR Keyu An]]|
|[[An, Zifan|AUTHOR Zifan An]]|
|[[Andersen, Asger H.|AUTHOR Asger H. Andersen]]|
|[[Andreeva, Bistra|AUTHOR Bistra Andreeva]]|
|[[Andrés-Ferrer, Jesús|AUTHOR Jesús Andrés-Ferrer]]|
|[[Andrusenko, Andrei|AUTHOR Andrei Andrusenko]]|
|[[Andzhukaev, Tseren|AUTHOR Tseren Andzhukaev]]|
|[[Angelini, Orazio|AUTHOR Orazio Angelini]]|
|[[Angrick, Miguel|AUTHOR Miguel Angrick]]|
|[[Anirudh, Rushil|AUTHOR Rushil Anirudh]]|
|[[Anjos, Ivo|AUTHOR Ivo Anjos]]|
|[[Antipov, Grigory|AUTHOR Grigory Antipov]]|
|[[Apfel, James|AUTHOR James Apfel]]|
|[[Arai, Kenichi|AUTHOR Kenichi Arai]]|
|[[Arai, Takayuki|AUTHOR Takayuki Arai]]|
|[[Araki, Shoko|AUTHOR Shoko Araki]]|
|[[Arias-Vergara, Tomás|AUTHOR Tomás Arias-Vergara]]|
|[[Ariki, Yasuo|AUTHOR Yasuo Ariki]]|
|[[Aronowitz, Hagai|AUTHOR Hagai Aronowitz]]|
|[[Arora, Ashish|AUTHOR Ashish Arora]]|
|[[Arronte Alvarez, Aitor|AUTHOR Aitor Arronte Alvarez]]|
|[[Arsikere, Harish|AUTHOR Harish Arsikere]]|
|[[Ashihara, Takanori|AUTHOR Takanori Ashihara]]|
|[[Ashishkumar, G.|AUTHOR G. Ashishkumar]]|
|[[Asim, Muhammad|AUTHOR Muhammad Asim]]|
|[[Aso, Masashi|AUTHOR Masashi Aso]]|
|[[Atamna, Asma|AUTHOR Asma Atamna]]|
|[[Atchayaram, Nalini|AUTHOR Nalini Atchayaram]]|
|[[Audhkhasi, Kartik|AUTHOR Kartik Audhkhasi]]|
|[[Augustyniak, Łukasz|AUTHOR Łukasz Augustyniak]]|
|[[Austin, Sara G.|AUTHOR Sara G. Austin]]|
|[[Avdeeva, Anastasia|AUTHOR Anastasia Avdeeva]]|
|[[Avgustinova, Tania|AUTHOR Tania Avgustinova]]|
|[[Avidov, Gilad|AUTHOR Gilad Avidov]]|
|[[Awasthi, Abhijeet|AUTHOR Abhijeet Awasthi]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Babel, Molly|AUTHOR Molly Babel]]|
|[[Babu, Ben P.|AUTHOR Ben P. Babu]]|
|[[Baby, Deepak|AUTHOR Deepak Baby]]|
|[[Bachoud-Lévi, Anne-Catherine|AUTHOR Anne-Catherine Bachoud-Lévi]]|
|[[Bäckström, Tom|AUTHOR Tom Bäckström]]|
|[[Badaskar, Sameer|AUTHOR Sameer Badaskar]]|
|[[Bae, Hanbin|AUTHOR Hanbin Bae]]|
|[[Bae, Jae-Sung|AUTHOR Jae-Sung Bae]]|
|[[Bagi, Anita|AUTHOR Anita Bagi]]|
|[[Bagnou, Jennifer Hamet|AUTHOR Jennifer Hamet Bagnou]]|
|[[Bai, Xue|AUTHOR Xue Bai]]|
|[[Bai, Ye|AUTHOR Ye Bai]]|
|[[Bai, Yu|AUTHOR Yu Bai]]|
|[[Baird, Alice|AUTHOR Alice Baird]]|
|[[Balagopalan, Aparna|AUTHOR Aparna Balagopalan]]|
|[[Bando, Yoshiaki|AUTHOR Yoshiaki Bando]]|
|[[Bandon, John|AUTHOR John Bandon]]|
|[[Banno, Hideki|AUTHOR Hideki Banno]]|
|[[Bansal, Shubham|AUTHOR Shubham Bansal]]|
|[[Bao, Changchun|AUTHOR Changchun Bao]]|
|[[Bao, Yuanyuan|AUTHOR Yuanyuan Bao]]|
|[[Bapat, Sandip Shriram|AUTHOR Sandip Shriram Bapat]]|
|[[Baquero-Arnal, Pau|AUTHOR Pau Baquero-Arnal]]|
|[[Barbera, David S.|AUTHOR David S. Barbera]]|
|[[Barbosa, Plinio A.|AUTHOR Plinio A. Barbosa]]|
|[[Barche, Purva|AUTHOR Purva Barche]]|
|[[Barker, Jon|AUTHOR Jon Barker]]|
|[[Barnard, Etienne|AUTHOR Etienne Barnard]]|
|[[Barra-Chicote, Roberto|AUTHOR Roberto Barra-Chicote]]|
|[[Barth, Volker|AUTHOR Volker Barth]]|
|[[Bartkova, Katarina|AUTHOR Katarina Bartkova]]|
|[[Batliner, Anton|AUTHOR Anton Batliner]]|
|[[Batricevic, Uros|AUTHOR Uros Batricevic]]|
|[[Baumeister, Harald|AUTHOR Harald Baumeister]]|
|[[Bear, Helen L.|AUTHOR Helen L. Bear]]|
|[[Beaufays, Françoise|AUTHOR Françoise Beaufays]]|
|[[Beck, Eugen|AUTHOR Eugen Beck]]|
|[[Beckmann, Pierre|AUTHOR Pierre Beckmann]]|
|[[Bederna, Felicitas|AUTHOR Felicitas Bederna]]|
|[[Beit-On, Hanan|AUTHOR Hanan Beit-On]]|
|[[Bekal, Dhanush|AUTHOR Dhanush Bekal]]|
|[[Beke, András|AUTHOR András Beke]]|
|[[Bell, Peter|AUTHOR Peter Bell]]|
|[[Bellet, Aurélien|AUTHOR Aurélien Bellet]]|
|[[Belur, Yamini|AUTHOR Yamini Belur]]|
|[[Benetos, Emmanouil|AUTHOR Emmanouil Benetos]]|
|[[Bergler, Christian|AUTHOR Christian Bergler]]|
|[[Berisha, Visar|AUTHOR Visar Berisha]]|
|[[Bernard, Mathieu|AUTHOR Mathieu Bernard]]|
|[[Berthouze, Nadia|AUTHOR Nadia Berthouze]]|
|[[Besacier, Laurent|AUTHOR Laurent Besacier]]|
|[[Best, Catherine T.|AUTHOR Catherine T. Best]]|
|[[Beyrami, Ebrahim|AUTHOR Ebrahim Beyrami]]|
|[[Bharadwaj, Samarth|AUTHOR Samarth Bharadwaj]]|
|[[Bhat, Gautam Shreedhar|AUTHOR Gautam Shreedhar Bhat]]|
|[[Bhati, Saurabhchand|AUTHOR Saurabhchand Bhati]]|
|[[Bhatnagar, Aadyot|AUTHOR Aadyot Bhatnagar]]|
|[[Bhatnagar, Varad|AUTHOR Varad Bhatnagar]]|
|[[Bhattacharya, Sourav|AUTHOR Sourav Bhattacharya]]|
|[[Bian, Yuchen|AUTHOR Yuchen Bian]]|
|[[Biemann, Chris|AUTHOR Chris Biemann]]|
|[[Billinghurst, Mark|AUTHOR Mark Billinghurst]]|
|[[Birkholz, Peter|AUTHOR Peter Birkholz]]|
|[[Bitzer, Jörg|AUTHOR Jörg Bitzer]]|
|[[Black, Alan W.|AUTHOR Alan W. Black]]|
|[[Blackburn, Daniel|AUTHOR Daniel Blackburn]]|
|[[Blood, Ian|AUTHOR Ian Blood]]|
|[[Bluche, Théodore|AUTHOR Théodore Bluche]]|
|[[Bocklet, Tobias|AUTHOR Tobias Bocklet]]|
|[[Bodapati, Sravan|AUTHOR Sravan Bodapati]]|
|[[Boeddeker, Christoph|AUTHOR Christoph Boeddeker]]|
|[[Bögel, Tina|AUTHOR Tina Bögel]]|
|[[Bollepalli, Bajibabu|AUTHOR Bajibabu Bollepalli]]|
|[[Bonastre, Jean-François|AUTHOR Jean-François Bonastre]]|
|[[Bonato, Paolo|AUTHOR Paolo Bonato]]|
|[[Bonnín, Clara|AUTHOR Clara Bonnín]]|
|[[Borgholt, Lasse|AUTHOR Lasse Borgholt]]|
|[[Botelho, Catarina|AUTHOR Catarina Botelho]]|
|[[Bou-Balust, Elisenda|AUTHOR Elisenda Bou-Balust]]|
|[[Bougares, Fethi|AUTHOR Fethi Bougares]]|
|[[Bourlard, Hervé|AUTHOR Hervé Bourlard]]|
|[[Bousbib, Ruben|AUTHOR Ruben Bousbib]]|
|[[Boves, Lou|AUTHOR Lou Boves]]|
|[[Bozorg, Narjes|AUTHOR Narjes Bozorg]]|
|[[Braun, Rudolf|AUTHOR Rudolf Braun]]|
|[[Braun, Sebastian|AUTHOR Sebastian Braun]]|
|[[Bredin, Hervé|AUTHOR Hervé Bredin]]|
|[[Brudno, Michael|AUTHOR Michael Brudno]]|
|[[Brueggeman, Avamarie|AUTHOR Avamarie Brueggeman]]|
|[[Bruguier, Antoine|AUTHOR Antoine Bruguier]]|
|[[Bryan, Nicholas J.|AUTHOR Nicholas J. Bryan]]|
|[[Bu, Hui|AUTHOR Hui Bu]]|
|[[Bui, Bach|AUTHOR Bach Bui]]|
|[[Bulut, Ahmet E.|AUTHOR Ahmet E. Bulut]]|
|[[Burget, Lukáš|AUTHOR Lukáš Burget]]|
|[[Burnham, Denis|AUTHOR Denis Burnham]]|
|[[Bushaev, Vitalii|AUTHOR Vitalii Bushaev]]|
|[[Busso, Carlos|AUTHOR Carlos Busso]]|
|[[Butt, Miriam|AUTHOR Miriam Butt]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Cadée, Tobias|AUTHOR Tobias Cadée]]|
|[[Cahill, Áine|AUTHOR Áine Cahill]]|
|[[Cahyawijaya, Samuel|AUTHOR Samuel Cahyawijaya]]|
|[[Cai, Ruichu|AUTHOR Ruichu Cai]]|
|[[Cai, Siqi|AUTHOR Siqi Cai]]|
|[[Cai, Xingyu|AUTHOR Xingyu Cai]]|
|[[Cai, Yunliang|AUTHOR Yunliang Cai]]|
|[[Cai, Yunqi|AUTHOR Yunqi Cai]]|
|[[Cai, Zexin|AUTHOR Zexin Cai]]|
|[[Calamia, Paul|AUTHOR Paul Calamia]]|
|[[Calvo, Jose R.|AUTHOR Jose R. Calvo]]|
|[[Campbell, William M.|AUTHOR William M. Campbell]]|
|[[Cao, Beiming|AUTHOR Beiming Cao]]|
|[[Cao, Houwei|AUTHOR Houwei Cao]]|
|[[Cao, Jin|AUTHOR Jin Cao]]|
|[[Cao, Ruisheng|AUTHOR Ruisheng Cao]]|
|[[Cao, Xuan-Nga|AUTHOR Xuan-Nga Cao]]|
|[[Cao, Yuewen|AUTHOR Yuewen Cao]]|
|[[Cao, Yuexin|AUTHOR Yuexin Cao]]|
|[[Caraty, Marie-José|AUTHOR Marie-José Caraty]]|
|[[Carbajal, Guillaume|AUTHOR Guillaume Carbajal]]|
|[[Cardoso, Rita|AUTHOR Rita Cardoso]]|
|[[Carlozzi, Noelle|AUTHOR Noelle Carlozzi]]|
|[[Carmiel, Yishay|AUTHOR Yishay Carmiel]]|
|[[Caseiro, Diamantino|AUTHOR Diamantino Caseiro]]|
|[[Castellani, Dan|AUTHOR Dan Castellani]]|
|[[Caubrière, Antoine|AUTHOR Antoine Caubrière]]|
|[[Cavaco, Sofia|AUTHOR Sofia Cavaco]]|
|[[Cernak, Milos|AUTHOR Milos Cernak]]|
|[[Cettolo, Mauro|AUTHOR Mauro Cettolo]]|
|[[Chai, Li|AUTHOR Li Chai]]|
|[[Chakrabarti, Chaitali|AUTHOR Chaitali Chakrabarti]]|
|[[Chalamandaris, Aimilios|AUTHOR Aimilios Chalamandaris]]|
|[[Champagne, Benoit|AUTHOR Benoit Champagne]]|
|[[Chande, Naglakshmi|AUTHOR Naglakshmi Chande]]|
|[[Chandra Shekar, Meena|AUTHOR Meena Chandra Shekar]]|
|[[Chandu, Khyathi Raghavi|AUTHOR Khyathi Raghavi Chandu]]|
|[[Chang, Chun-Chieh|AUTHOR Chun-Chieh Chang]]|
|[[Chang, Chun-Min|AUTHOR Chun-Min Chang]]|
|[[Chang, Hsiu-Jui|AUTHOR Hsiu-Jui Chang]]|
|[[Chang, Joon-Hyuk|AUTHOR Joon-Hyuk Chang]]|
|[[Chang, Shuo-Yiin|AUTHOR Shuo-Yiin Chang]]|
|[[Chang, Xuankai|AUTHOR Xuankai Chang]]|
|[[Chang, Yueh-chin|AUTHOR Yueh-chin Chang]]|
|[[Chao, Fu-An|AUTHOR Fu-An Chao]]|
|[[Charon, Nicolas|AUTHOR Nicolas Charon]]|
|[[Chemmengath, Saneem|AUTHOR Saneem Chemmengath]]|
|[[Chen, Aoju|AUTHOR Aoju Chen]]|
|[[Chen, Berlin|AUTHOR Berlin Chen]]|
|[[Chen, Binbin|AUTHOR Binbin Chen]]|
|[[Chen, Boxing|AUTHOR Boxing Chen]]|
|[[Chen, Chen-Yu|AUTHOR Chen-Yu Chen]]|
|[[Chen, Fei|AUTHOR Fei Chen]]|
|[[Chen, Fuling|AUTHOR Fuling Chen]]|
|[[Chen, Hangting|AUTHOR Hangting Chen]]|
|[[Chen, Hongsheng|AUTHOR Hongsheng Chen]]|
|[[Chen, Huili|AUTHOR Huili Chen]]|
|[[Chen, Jian|AUTHOR Jian Chen]]|
|[[Chen, Jing|AUTHOR Jing Chen]]|
|[[Chen, Jingjing|AUTHOR Jingjing Chen]]|
|[[Chen, Kai|AUTHOR Kai Chen]]|
|[[Chen, Kuan|AUTHOR Kuan Chen]]|
|[[Chen, Kuan-Yu|AUTHOR Kuan-Yu Chen]]|
|[[Chen, Lianwu|AUTHOR Lianwu Chen]]|
|[[Chen, Ling-Hui|AUTHOR Ling-Hui Chen]]|
|[[Chen, Lu|AUTHOR Lu Chen]]|
|[[Chen, Luoxin|AUTHOR Luoxin Chen]]|
|[[Chen, Minchuan|AUTHOR Minchuan Chen]]|
|[[Chen, Ming|AUTHOR Ming Chen]]|
|[[Chen, Mingjian|AUTHOR Mingjian Chen]]|
|[[Chen, Mingjie|AUTHOR Mingjie Chen]]|
|[[Chen, Nancy F.|AUTHOR Nancy F. Chen]]|
|[[Chen, Nanxin|AUTHOR Nanxin Chen]]|
|[[Chen, Rui|AUTHOR Rui Chen]]|
|[[Chen, Si|AUTHOR Si Chen]]|
|[[Chen, Siyuan|AUTHOR Siyuan Chen]]|
|[[Chen, Tianlong|AUTHOR Tianlong Chen]]|
|[[Chen, Wenda|AUTHOR Wenda Chen]]|
|[[Chen, Xianhong|AUTHOR Xianhong Chen]]|
|[[Chen, Xiao|AUTHOR Xiao Chen]]|
|[[Chen, Xiao|AUTHOR Xiao Chen]]|
|[[Chen, Xi|AUTHOR Xi Chen]]|
|[[Chen, Xi|AUTHOR Xi Chen]]|
|[[Chen, Yang|AUTHOR Yang Chen]]|
|[[Chen, Yangbin|AUTHOR Yangbin Chen]]|
|[[Chen, Yefei|AUTHOR Yefei Chen]]|
|[[Chen, Yen-Hao|AUTHOR Yen-Hao Chen]]|
|[[Chen, Yi-Chen|AUTHOR Yi-Chen Chen]]|
|[[Chen, Ying|AUTHOR Ying Chen]]|
|[[Chen, Yuan-Jui|AUTHOR Yuan-Jui Chen]]|
|[[Chen, Yu-Hua|AUTHOR Yu-Hua Chen]]|
|[[Chen, Yuxiang|AUTHOR Yuxiang Chen]]|
|[[Chen, Zhan|AUTHOR Zhan Chen]]|
|[[Chen, Zhehuai|AUTHOR Zhehuai Chen]]|
|[[Chen, Zhengmao|AUTHOR Zhengmao Chen]]|
|[[Chen, Zhengyang|AUTHOR Zhengyang Chen]]|
|[[Chen, Zhuo|AUTHOR Zhuo Chen]]|
|[[Chen, Zhuxin|AUTHOR Zhuxin Chen]]|
|[[Cheng, Junjie|AUTHOR Junjie Cheng]]|
|[[Cheng, Ning|AUTHOR Ning Cheng]]|
|[[Cheng, Roger|AUTHOR Roger Cheng]]|
|[[Cheng, Rui|AUTHOR Rui Cheng]]|
|[[Cheng, Sitong|AUTHOR Sitong Cheng]]|
|[[Chennupati, Nivedita|AUTHOR Nivedita Chennupati]]|
|[[Cheon, Sung Jun|AUTHOR Sung Jun Cheon]]|
|[[Chermaz, Carol|AUTHOR Carol Chermaz]]|
|[[Chetupalli, Srikanth Raj|AUTHOR Srikanth Raj Chetupalli]]|
|[[Chiao, Alan|AUTHOR Alan Chiao]]|
|[[Chiba, Yuya|AUTHOR Yuya Chiba]]|
|[[Chien, Jen-Tzung|AUTHOR Jen-Tzung Chien]]|
|[[Ching, P.C.|AUTHOR P.C. Ching]]|
|[[Chiu, Chung-Cheng|AUTHOR Chung-Cheng Chiu]]|
|[[Chiu, Waito|AUTHOR Waito Chiu]]|
|[[Chng, Eng Siong|AUTHOR Eng Siong Chng]]|
|[[Cho, Hoon-Young|AUTHOR Hoon-Young Cho]]|
|[[Cho, Jaejin|AUTHOR Jaejin Cho]]|
|[[Cho, Won Ik|AUTHOR Won Ik Cho]]|
|[[Choe, Soyeon|AUTHOR Soyeon Choe]]|
|[[Choi, Byoung Jin|AUTHOR Byoung Jin Choi]]|
|[[Choi, Seong|AUTHOR Seong Choi]]|
|[[Choi, Seungtaek|AUTHOR Seungtaek Choi]]|
|[[Choi, Seungwoo|AUTHOR Seungwoo Choi]]|
|[[Choi, Yeunju|AUTHOR Yeunju Choi]]|
|[[Chong, Dading|AUTHOR Dading Chong]]|
|[[Choo, Kihyun|AUTHOR Kihyun Choo]]|
|[[Chorowski, Jan|AUTHOR Jan Chorowski]]|
|[[Chou, Huang-Cheng|AUTHOR Huang-Cheng Chou]]|
|[[Choudhary, Samridhi|AUTHOR Samridhi Choudhary]]|
|[[Chowdhury, Anurag|AUTHOR Anurag Chowdhury]]|
|[[Chowdhury, Shammur A.|AUTHOR Shammur A. Chowdhury]]|
|[[Christensen, Heidi|AUTHOR Heidi Christensen]]|
|[[Christensen, Mads Græsbøll|AUTHOR Mads Græsbøll Christensen]]|
|[[Chu, Wei|AUTHOR Wei Chu]]|
|[[Chu, Xiangxiang|AUTHOR Xiangxiang Chu]]|
|[[Chuang, Shang-Yi|AUTHOR Shang-Yi Chuang]]|
|[[Chuang, Yung-Sung|AUTHOR Yung-Sung Chuang]]|
|[[Chukharev-Hudilainen, Evgeny|AUTHOR Evgeny Chukharev-Hudilainen]]|
|[[Chung, Joon Son|AUTHOR Joon Son Chung]]|
|[[Chung, Minhwa|AUTHOR Minhwa Chung]]|
|[[Chung, Soo-Whan|AUTHOR Soo-Whan Chung]]|
|[[Chung, Yu-An|AUTHOR Yu-An Chung]]|
|[[Church, Kenneth|AUTHOR Kenneth Church]]|
|[[Ciccarelli, Gregory|AUTHOR Gregory Ciccarelli]]|
|[[Civera, Jorge|AUTHOR Jorge Civera]]|
|[[Clark, Rob|AUTHOR Rob Clark]]|
|[[Clavel, Chloé|AUTHOR Chloé Clavel]]|
|[[Clayton, Jonathan|AUTHOR Jonathan Clayton]]|
|[[Clermont, Frantz|AUTHOR Frantz Clermont]]|
|[[Cohn, Michelle|AUTHOR Michelle Cohn]]|
|[[Coley-Fisher, Henry|AUTHOR Henry Coley-Fisher]]|
|[[Collobert, Ronan|AUTHOR Ronan Collobert]]|
|[[Colonna, Juan G.|AUTHOR Juan G. Colonna]]|
|[[Colotte, Vincent|AUTHOR Vincent Colotte]]|
|[[Cong, Jian|AUTHOR Jian Cong]]|
|[[Cooke, Martin|AUTHOR Martin Cooke]]|
|[[Cooper, Erica|AUTHOR Erica Cooper]]|
|[[Cordes Galbraith, Max|AUTHOR Max Cordes Galbraith]]|
|[[Cornell, Samuele|AUTHOR Samuele Cornell]]|
|[[Cornish, Andrew|AUTHOR Andrew Cornish]]|
|[[Cosentino, Joris|AUTHOR Joris Cosentino]]|
|[[Cottrell, Garrison W.|AUTHOR Garrison W. Cottrell]]|
|[[Cox, Felicity|AUTHOR Felicity Cox]]|
|[[Cozzo, Austin|AUTHOR Austin Cozzo]]|
|[[Crinion, Jenny|AUTHOR Jenny Crinion]]|
|[[Cristia, Alejandrina|AUTHOR Alejandrina Cristia]]|
|[[Cruz Blandón, María Andrea|AUTHOR María Andrea Cruz Blandón]]|
|[[Csapó, Tamás Gábor|AUTHOR Tamás Gábor Csapó]]|
|[[Cucchiarini, Catia|AUTHOR Catia Cucchiarini]]|
|[[Cui, Jia|AUTHOR Jia Cui]]|
|[[Cui, Jie|AUTHOR Jie Cui]]|
|[[Cui, Ping|AUTHOR Ping Cui]]|
|[[Cui, Yang|AUTHOR Yang Cui]]|
|[[Cummins, Nicholas|AUTHOR Nicholas Cummins]]|
|[[Cunningham, Stuart|AUTHOR Stuart Cunningham]]|
|[[Cutler, Ross|AUTHOR Ross Cutler]]|
|[[Cvetkovic, Zoran|AUTHOR Zoran Cvetkovic]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Dai, Li-Rong|AUTHOR Li-Rong Dai]]|
|[[Dai, Wang|AUTHOR Wang Dai]]|
|[[Dai, Yinpei|AUTHOR Yinpei Dai]]|
|[[Dang, Jianwu|AUTHOR Jianwu Dang]]|
|[[Dang, Viet-Trung|AUTHOR Viet-Trung Dang]]|
|[[Dargnat, Mathilde|AUTHOR Mathilde Dargnat]]|
|[[Darvish, Bita|AUTHOR Bita Darvish]]|
|[[Das, Anurag|AUTHOR Anurag Das]]|
|[[Das, Partha Pratim|AUTHOR Partha Pratim Das]]|
|[[Das, Rohan Kumar|AUTHOR Rohan Kumar Das]]|
|[[Das, Sneha|AUTHOR Sneha Das]]|
|[[Dash, Debadatta|AUTHOR Debadatta Dash]]|
|[[Dashtipour, Kia|AUTHOR Kia Dashtipour]]|
|[[Däubener, Sina|AUTHOR Sina Däubener]]|
|[[Davody, Ali|AUTHOR Ali Davody]]|
|[[Deadman, Jack|AUTHOR Jack Deadman]]|
|[[Debita, Grzegorz|AUTHOR Grzegorz Debita]]|
|[[de Chaumont Quitry, Félix|AUTHOR Félix de Chaumont Quitry]]|
|[[Defina, Rebecca|AUTHOR Rebecca Defina]]|
|[[Défossez, Alexandre|AUTHOR Alexandre Défossez]]|
|[[Defraene, Bruno|AUTHOR Bruno Defraene]]|
|[[Degala, Divya|AUTHOR Divya Degala]]|
|[[Dehak, Najim|AUTHOR Najim Dehak]]|
|[[de Korte, Marcel|AUTHOR Marcel de Korte]]|
|[[de la Fuente, Sofia|AUTHOR Sofia de la Fuente]]|
|[[DeLaura, Richard|AUTHOR Richard DeLaura]]|
|[[Delcroix, Marc|AUTHOR Marc Delcroix]]|
|[[Deleforge, Antoine|AUTHOR Antoine Deleforge]]|
|[[del Mar Cordero, Maria|AUTHOR Maria del Mar Cordero]]|
|[[Demirşahin, Işın|AUTHOR Işın Demirşahin]]|
|[[Demuynck, Kris|AUTHOR Kris Demuynck]]|
|[[Deng, Agape|AUTHOR Agape Deng]]|
|[[Deng, Chengyun|AUTHOR Chengyun Deng]]|
|[[Deng, Feng|AUTHOR Feng Deng]]|
|[[Deng, Shuwen|AUTHOR Shuwen Deng]]|
|[[Deng, Yayue|AUTHOR Yayue Deng]]|
|[[Denisov, Pavel|AUTHOR Pavel Denisov]]|
|[[Dentel, Laure|AUTHOR Laure Dentel]]|
|[[Desplanques, Brecht|AUTHOR Brecht Desplanques]]|
|[[Devalraju, Dhanunjaya Varma|AUTHOR Dhanunjaya Varma Devalraju]]|
|[[D’Haese, Zoltán|AUTHOR Zoltán D’Haese]]|
|[[Dhamyal, Hira|AUTHOR Hira Dhamyal]]|
|[[Dheram, Pranav|AUTHOR Pranav Dheram]]|
|[[Dhir, Chandra|AUTHOR Chandra Dhir]]|
|[[Didolkar, Aniket|AUTHOR Aniket Didolkar]]|
|[[Diener, Lorenz|AUTHOR Lorenz Diener]]|
|[[Di Gangi, Mattia A.|AUTHOR Mattia A. Di Gangi]]|
|[[Dighe, Pranay|AUTHOR Pranay Dighe]]|
|[[Dimitriadis, Dimitrios|AUTHOR Dimitrios Dimitriadis]]|
|[[Dinesh, Dileep Aroor|AUTHOR Dileep Aroor Dinesh]]|
|[[Ding, Chenchen|AUTHOR Chenchen Ding]]|
|[[Ding, Fenglin|AUTHOR Fenglin Ding]]|
|[[Ding, Guohong|AUTHOR Guohong Ding]]|
|[[Ding, Hongwei|AUTHOR Hongwei Ding]]|
|[[Ding, Shaojin|AUTHOR Shaojin Ding]]|
|[[Ding, Wenbing|AUTHOR Wenbing Ding]]|
|[[Dinh, Tuan|AUTHOR Tuan Dinh]]|
|[[Dinkel, Heinrich|AUTHOR Heinrich Dinkel]]|
|[[Dissanayake, Vipula|AUTHOR Vipula Dissanayake]]|
|[[Ditter, David|AUTHOR David Ditter]]|
|[[Do, Quoc Truong|AUTHOR Quoc Truong Do]]|
|[[Do, Quynh|AUTHOR Quynh Do]]|
|[[Dobson, Richard|AUTHOR Richard Dobson]]|
|[[Doclo, Simon|AUTHOR Simon Doclo]]|
|[[Dognin, Charles|AUTHOR Charles Dognin]]|
|[[Doh, Kyoungtae|AUTHOR Kyoungtae Doh]]|
|[[Dong, Shuyan|AUTHOR Shuyan Dong]]|
|[[Dong, Xuan|AUTHOR Xuan Dong]]|
|[[Dong, Yuanjie|AUTHOR Yuanjie Dong]]|
|[[Dong, Yue|AUTHOR Yue Dong]]|
|[[dos Santos, Eulanda M.|AUTHOR Eulanda M. dos Santos]]|
|[[Dou, Qingyun|AUTHOR Qingyun Dou]]|
|[[Douros, Ioannis K.|AUTHOR Ioannis K. Douros]]|
|[[Dourou, Chrysanthi|AUTHOR Chrysanthi Dourou]]|
|[[Dousti, Mohammad Javad|AUTHOR Mohammad Javad Dousti]]|
|[[Dresvyanskiy, Denis|AUTHOR Denis Dresvyanskiy]]|
|[[Droppo, Jasha|AUTHOR Jasha Droppo]]|
|[[Drude, Lukas|AUTHOR Lukas Drude]]|
|[[Drugman, Thomas|AUTHOR Thomas Drugman]]|
|[[Du, Dan|AUTHOR Dan Du]]|
|[[Du, Jun|AUTHOR Jun Du]]|
|[[Du, Junzhao|AUTHOR Junzhao Du]]|
|[[Du, Yujiao|AUTHOR Yujiao Du]]|
|[[Du, Zhihao|AUTHOR Zhihao Du]]|
|[[Duan, Richeng|AUTHOR Richeng Duan]]|
|[[Dubey, Harishchandra|AUTHOR Harishchandra Dubey]]|
|[[Dudziak, Łukasz|AUTHOR Łukasz Dudziak]]|
|[[Dufour, Richard|AUTHOR Richard Dufour]]|
|[[Dufour, Sophie|AUTHOR Sophie Dufour]]|
|[[Dunbar, Ewan|AUTHOR Ewan Dunbar]]|
|[[Dupoux, Emmanuel|AUTHOR Emmanuel Dupoux]]|
|[[Duroselle, Raphaël|AUTHOR Raphaël Duroselle]]|
|[[Dušek, Ondřej|AUTHOR Ondřej Dušek]]|
|[[Dutoit, Thierry|AUTHOR Thierry Dutoit]]|
|[[Dutta, Anirban|AUTHOR Anirban Dutta]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Edunov, Sergey|AUTHOR Sergey Edunov]]|
|[[Edwards, Erik|AUTHOR Erik Edwards]]|
|[[Effendi, Johanes|AUTHOR Johanes Effendi]]|
|[[Efiong, Joshua|AUTHOR Joshua Efiong]]|
|[[Elavathur Ranganatha, Narayanan|AUTHOR Narayanan Elavathur Ranganatha]]|
|[[Elbayad, Maha|AUTHOR Maha Elbayad]]|
|[[Eldesouki, Mohamed|AUTHOR Mohamed Eldesouki]]|
|[[El Haddad, Kevin|AUTHOR Kevin El Haddad]]|
|[[Ellinas, Nikolaos|AUTHOR Nikolaos Ellinas]]|
|[[El Zarka, Dina|AUTHOR Dina El Zarka]]|
|[[Emanuel, Dotan|AUTHOR Dotan Emanuel]]|
|[[Enyedi, Robert|AUTHOR Robert Enyedi]]|
|[[Epps, Julien|AUTHOR Julien Epps]]|
|[[Erickson, Donna|AUTHOR Donna Erickson]]|
|[[Escalante-B., A.N.|AUTHOR A.N. Escalante-B.]]|
|[[Escott, Alex|AUTHOR Alex Escott]]|
|[[Eskenazi, Maxine|AUTHOR Maxine Eskenazi]]|
|[[Eskimez, Sefik Emre|AUTHOR Sefik Emre Eskimez]]|
|[[Esposito, Anna|AUTHOR Anna Esposito]]|
|[[Espy-Wilson, Carol|AUTHOR Carol Espy-Wilson]]|
|[[Estève, Yannick|AUTHOR Yannick Estève]]|
|[[Evanini, Keelan|AUTHOR Keelan Evanini]]|
|[[Evans, Nicholas|AUTHOR Nicholas Evans]]|
|[[Evermann, Gunnar|AUTHOR Gunnar Evermann]]|
|[[Ewers, Robert|AUTHOR Robert Ewers]]|
|[[Eyre, Benjamin|AUTHOR Benjamin Eyre]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Fabbri, Margherita|AUTHOR Margherita Fabbri]]|
|[[Falavigna, Daniele|AUTHOR Daniele Falavigna]]|
|[[Falkowski-Gilski, Przemyslaw|AUTHOR Przemyslaw Falkowski-Gilski]]|
|[[Fan, Cunhang|AUTHOR Cunhang Fan]]|
|[[Fan, Kai|AUTHOR Kai Fan]]|
|[[Fan, Lu|AUTHOR Lu Fan]]|
|[[Fan, Ruchao|AUTHOR Ruchao Fan]]|
|[[Fan, Weiquan|AUTHOR Weiquan Fan]]|
|[[Fan, Wenzhi|AUTHOR Wenzhi Fan]]|
|[[Fan, Zehao|AUTHOR Zehao Fan]]|
|[[Fang, Qiang|AUTHOR Qiang Fang]]|
|[[Fang, Ruomei|AUTHOR Ruomei Fang]]|
|[[Farris, Brian|AUTHOR Brian Farris]]|
|[[Farrús, Mireia|AUTHOR Mireia Farrús]]|
|[[Farzana, Shahla|AUTHOR Shahla Farzana]]|
|[[Favre, Benoit|AUTHOR Benoit Favre]]|
|[[Federico, Marcello|AUTHOR Marcello Federico]]|
|[[Fedorov, Igor|AUTHOR Igor Fedorov]]|
|[[Fedotov, Dmitrii|AUTHOR Dmitrii Fedotov]]|
|[[Felblinger, Jacques|AUTHOR Jacques Felblinger]]|
|[[Fels, Sidney|AUTHOR Sidney Fels]]|
|[[Feng, Han|AUTHOR Han Feng]]|
|[[Feng, Jinwei|AUTHOR Jinwei Feng]]|
|[[Feng, Junlan|AUTHOR Junlan Feng]]|
|[[Feng, Siyuan|AUTHOR Siyuan Feng]]|
|[[Feng, Xiaoli|AUTHOR Xiaoli Feng]]|
|[[Feng, Yan|AUTHOR Yan Feng]]|
|[[Ferrari, Paul|AUTHOR Paul Ferrari]]|
|[[Ferreira, Joaquim|AUTHOR Joaquim Ferreira]]|
|[[Fingscheidt, Tim|AUTHOR Tim Fingscheidt]]|
|[[Finkelstein, Adam|AUTHOR Adam Finkelstein]]|
|[[Fischer, Asja|AUTHOR Asja Fischer]]|
|[[Fleming, Victoria|AUTHOR Victoria Fleming]]|
|[[Fluyt, Kristoff|AUTHOR Kristoff Fluyt]]|
|[[Foglianti, Lorenzo|AUTHOR Lorenzo Foglianti]]|
|[[Fong, Jason|AUTHOR Jason Fong]]|
|[[Fontaine, Mathieu|AUTHOR Mathieu Fontaine]]|
|[[Frank, Ariel|AUTHOR Ariel Frank]]|
|[[Fredouille, Corinne|AUTHOR Corinne Fredouille]]|
|[[Fristed, Emil|AUTHOR Emil Fristed]]|
|[[Fritsch, Julian|AUTHOR Julian Fritsch]]|
|[[Fromm, Davida|AUTHOR Davida Fromm]]|
|[[Fu, Gengshen|AUTHOR Gengshen Fu]]|
|[[Fu, Kaiqi|AUTHOR Kaiqi Fu]]|
|[[Fu, Qiang|AUTHOR Qiang Fu]]|
|[[Fu, Ruibo|AUTHOR Ruibo Fu]]|
|[[Fu, Szu-Wei|AUTHOR Szu-Wei Fu]]|
|[[Fu, Yihui|AUTHOR Yihui Fu]]|
|[[Fu, Zhen|AUTHOR Zhen Fu]]|
|[[Fuchs, Guillaume|AUTHOR Guillaume Fuchs]]|
|[[Fuegen, Christian|AUTHOR Christian Fuegen]]|
|[[Fuhrman, Robert A.|AUTHOR Robert A. Fuhrman]]|
|[[Fujioka, Takuya|AUTHOR Takuya Fujioka]]|
|[[Fujita, Yusuke|AUTHOR Yusuke Fujita]]|
|[[Fujita, Yuya|AUTHOR Yuya Fujita]]|
|[[Fukuda, Takashi|AUTHOR Takashi Fukuda]]|
|[[Fukui, Kazuhiro|AUTHOR Kazuhiro Fukui]]|
|[[Fung, Pascale|AUTHOR Pascale Fung]]|
|[[Fuscone, Simone|AUTHOR Simone Fuscone]]|
|[[Futami, Hayato|AUTHOR Hayato Futami]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[G., Abhijith|AUTHOR Abhijith G.]]|
|[[Gaddam, Navaneetha|AUTHOR Navaneetha Gaddam]]|
|[[Gaido, Marco|AUTHOR Marco Gaido]]|
|[[Galajit, Kasorn|AUTHOR Kasorn Galajit]]|
|[[Gale, William|AUTHOR William Gale]]|
|[[Gales, Mark J.F.|AUTHOR Mark J.F. Gales]]|
|[[Gan, Yiming|AUTHOR Yiming Gan]]|
|[[Ganapathy, Sriram|AUTHOR Sriram Ganapathy]]|
|[[Gao, Guanglai|AUTHOR Guanglai Gao]]|
|[[Gao, Hongcan|AUTHOR Hongcan Gao]]|
|[[Gao, Jiameng|AUTHOR Jiameng Gao]]|
|[[Gao, Jianqing|AUTHOR Jianqing Gao]]|
|[[Gao, Jie|AUTHOR Jie Gao]]|
|[[Gao, Shengzhou|AUTHOR Shengzhou Gao]]|
|[[Gao, Yang|AUTHOR Yang Gao]]|
|[[Gao, Yi|AUTHOR Yi Gao]]|
|[[Gao, Yingbo|AUTHOR Yingbo Gao]]|
|[[Gao, Yingming|AUTHOR Yingming Gao]]|
|[[Gao, Yixin|AUTHOR Yixin Gao]]|
|[[Gao, Yuan|AUTHOR Yuan Gao]]|
|[[Gao, Zhifu|AUTHOR Zhifu Gao]]|
|[[García, Basi|AUTHOR Basi García]]|
|[[García Lecumberri, María Luisa|AUTHOR María Luisa García Lecumberri]]|
|[[Garcia-Perera, Leibny Paola|AUTHOR Leibny Paola Garcia-Perera]]|
|[[Garg, Abhinav|AUTHOR Abhinav Garg]]|
|[[Garg, Vineet|AUTHOR Vineet Garg]]|
|[[Garimella, Sri|AUTHOR Sri Garimella]]|
|[[Gaspers, Judith|AUTHOR Judith Gaspers]]|
|[[Gatto, Bernardo B.|AUTHOR Bernardo B. Gatto]]|
|[[Gaur, Neeraj|AUTHOR Neeraj Gaur]]|
|[[Gaur, Yashesh|AUTHOR Yashesh Gaur]]|
|[[Ge, Meng|AUTHOR Meng Ge]]|
|[[Ge, Niyu|AUTHOR Niyu Ge]]|
|[[Gehrke, Johannes|AUTHOR Johannes Gehrke]]|
|[[Gemello, Roberto|AUTHOR Roberto Gemello]]|
|[[Geng, Mengzhe|AUTHOR Mengzhe Geng]]|
|[[Gengembre, Nicolas|AUTHOR Nicolas Gengembre]]|
|[[Georges, Munir|AUTHOR Munir Georges]]|
|[[Gerazov, Branislav|AUTHOR Branislav Gerazov]]|
|[[Gerkmann, Timo|AUTHOR Timo Gerkmann]]|
|[[Gessinger, Iona|AUTHOR Iona Gessinger]]|
|[[Gevrek, Özgür Bora|AUTHOR Özgür Bora Gevrek]]|
|[[Ghahramani, Pegah|AUTHOR Pegah Ghahramani]]|
|[[Ghasemzadeh, Mohammad|AUTHOR Mohammad Ghasemzadeh]]|
|[[Ghio, Alain|AUTHOR Alain Ghio]]|
|[[Ghorbani, Shahram|AUTHOR Shahram Ghorbani]]|
|[[Ghosh, Prasanta Kumar|AUTHOR Prasanta Kumar Ghosh]]|
|[[Ghosh, Sreyan|AUTHOR Sreyan Ghosh]]|
|[[Ghosh Chowdhury, Arijit|AUTHOR Arijit Ghosh Chowdhury]]|
|[[Giavazzi, Maria|AUTHOR Maria Giavazzi]]|
|[[Gill, Marie-Philippe|AUTHOR Marie-Philippe Gill]]|
|[[Giménez, Adrià|AUTHOR Adrià Giménez]]|
|[[Gimeno, Pablo|AUTHOR Pablo Gimeno]]|
|[[Ginsburg, Boris|AUTHOR Boris Ginsburg]]|
|[[Giri, Ritwik|AUTHOR Ritwik Giri]]|
|[[Girin, Laurent|AUTHOR Laurent Girin]]|
|[[Girshick, Ross|AUTHOR Ross Girshick]]|
|[[Gisselbrecht, Thibault|AUTHOR Thibault Gisselbrecht]]|
|[[Glasman, Alex|AUTHOR Alex Glasman]]|
|[[Glass, James|AUTHOR James Glass]]|
|[[Glembek, Ondřej|AUTHOR Ondřej Glembek]]|
|[[Gmyr, Robert|AUTHOR Robert Gmyr]]|
|[[Gogate, Mandar|AUTHOR Mandar Gogate]]|
|[[Golokolenko, Oleg|AUTHOR Oleg Golokolenko]]|
|[[Gómez, Emilia|AUTHOR Emilia Gómez]]|
|[[Gomez, Gloria Montoya|AUTHOR Gloria Montoya Gomez]]|
|[[Gong, Xinyu|AUTHOR Xinyu Gong]]|
|[[Gong, Yifan|AUTHOR Yifan Gong]]|
|[[González Hautamäki, Rosa|AUTHOR Rosa González Hautamäki]]|
|[[Goo, Jahyun|AUTHOR Jahyun Goo]]|
|[[Goodwin, Morten|AUTHOR Morten Goodwin]]|
|[[Gopal, Vishak|AUTHOR Vishak Gopal]]|
|[[Gopalakrishnan, Karthik|AUTHOR Karthik Gopalakrishnan]]|
|[[Gope, Dipanjan|AUTHOR Dipanjan Gope]]|
|[[Gopikishore, Pebbili|AUTHOR Pebbili Gopikishore]]|
|[[Gorin, Arseniy|AUTHOR Arseniy Gorin]]|
|[[Gorinski, Philip John|AUTHOR Philip John Gorinski]]|
|[[Gorlanov, Artem|AUTHOR Artem Gorlanov]]|
|[[Gosztolya, Gábor|AUTHOR Gábor Gosztolya]]|
|[[Goto, Shunsuke|AUTHOR Shunsuke Goto]]|
|[[Gowda, Dhananjaya|AUTHOR Dhananjaya Gowda]]|
|[[Granmo, Ole-Christoffer|AUTHOR Ole-Christoffer Granmo]]|
|[[Granqvist, Filip|AUTHOR Filip Granqvist]]|
|[[Green, Jordan R.|AUTHOR Jordan R. Green]]|
|[[Gresse, Adrien|AUTHOR Adrien Gresse]]|
|[[Gretter, Roberto|AUTHOR Roberto Gretter]]|
|[[Grilo, Margarida|AUTHOR Margarida Grilo]]|
|[[Grima, Steven|AUTHOR Steven Grima]]|
|[[Grimault, Nicolas|AUTHOR Nicolas Grimault]]|
|[[Grondin, François|AUTHOR François Grondin]]|
|[[Grönroos, Stig-Arne|AUTHOR Stig-Arne Grönroos]]|
|[[Grósz, Tamás|AUTHOR Tamás Grósz]]|
|[[Gruenstein, Alexander|AUTHOR Alexander Gruenstein]]|
|[[Gu, Bin|AUTHOR Bin Gu]]|
|[[Gu, Binbin|AUTHOR Binbin Gu]]|
|[[Gu, Jiacheng|AUTHOR Jiacheng Gu]]|
|[[Gu, Jiatao|AUTHOR Jiatao Gu]]|
|[[Gu, Mingxiao|AUTHOR Mingxiao Gu]]|
|[[Gu, Rongzhi|AUTHOR Rongzhi Gu]]|
|[[Gu, Wentao|AUTHOR Wentao Gu]]|
|[[Gu, Yanmei|AUTHOR Yanmei Gu]]|
|[[Gu, Yuling|AUTHOR Yuling Gu]]|
|[[Gudepu, Prithvi R.R.|AUTHOR Prithvi R.R. Gudepu]]|
|[[Guillot, Fabien|AUTHOR Fabien Guillot]]|
|[[Guimarães, Isabel|AUTHOR Isabel Guimarães]]|
|[[Gulati, Anmol|AUTHOR Anmol Gulati]]|
|[[Gump, Michael|AUTHOR Michael Gump]]|
|[[Gundogdu, Batuhan|AUTHOR Batuhan Gundogdu]]|
|[[Guo, Chenkai|AUTHOR Chenkai Guo]]|
|[[Guo, Jiaqi|AUTHOR Jiaqi Guo]]|
|[[Guo, Jinxi|AUTHOR Jinxi Guo]]|
|[[Guo, Kaibin|AUTHOR Kaibin Guo]]|
|[[Guo, Lili|AUTHOR Lili Guo]]|
|[[Guo, Ling|AUTHOR Ling Guo]]|
|[[Guo, Pengcheng|AUTHOR Pengcheng Guo]]|
|[[Guo, Wu|AUTHOR Wu Guo]]|
|[[Guo, Xiawei|AUTHOR Xiawei Guo]]|
|[[Gupta, Anshul|AUTHOR Anshul Gupta]]|
|[[Gupta, Ashutosh|AUTHOR Ashutosh Gupta]]|
|[[Gurugubelli, Krishna|AUTHOR Krishna Gurugubelli]]|
|[[Gusev, Aleksei|AUTHOR Aleksei Gusev]]|
|[[Gussenhoven, Carlos|AUTHOR Carlos Gussenhoven]]|
|[[Gutierrez-Osuna, Ricardo|AUTHOR Ricardo Gutierrez-Osuna]]|
|[[Gutkin, Alexander|AUTHOR Alexander Gutkin]]|
|[[Gutz, Sarah E.|AUTHOR Sarah E. Gutz]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[H., Muralikrishna|AUTHOR Muralikrishna H.]]|
|[[Ha, Jung-Woo|AUTHOR Jung-Woo Ha]]|
|[[Ha, Sungjoo|AUTHOR Sungjoo Ha]]|
|[[Ha, Thanh-Le|AUTHOR Thanh-Le Ha]]|
|[[Habberstad, Doug|AUTHOR Doug Habberstad]]|
|[[Habets, Emanuël A.P.|AUTHOR Emanuël A.P. Habets]]|
|[[Habrych, Marcin|AUTHOR Marcin Habrych]]|
|[[Hadian, Hossein|AUTHOR Hossein Hadian]]|
|[[Haeb-Umbach, Reinhold|AUTHOR Reinhold Haeb-Umbach]]|
|[[Haffari, Gholamreza|AUTHOR Gholamreza Haffari]]|
|[[Haghani, Parisa|AUTHOR Parisa Haghani]]|
|[[Haider, Fasih|AUTHOR Fasih Haider]]|
|[[Hain, Thomas|AUTHOR Thomas Hain]]|
|[[Hakkani-Tür, Dilek|AUTHOR Dilek Hakkani-Tür]]|
|[[Halpern, Bence Mark|AUTHOR Bence Mark Halpern]]|
|[[Ham, Chiheon|AUTHOR Chiheon Ham]]|
|[[Hamada, Kenta|AUTHOR Kenta Hamada]]|
|[[Hamann, Silke|AUTHOR Silke Hamann]]|
|[[Hamill, Christopher|AUTHOR Christopher Hamill]]|
|[[Hamilton, Antonia|AUTHOR Antonia Hamilton]]|
|[[Hamza, Wael|AUTHOR Wael Hamza]]|
|[[Han, David K.|AUTHOR David K. Han]]|
|[[Han, Hojae|AUTHOR Hojae Han]]|
|[[Han, Hyewon|AUTHOR Hyewon Han]]|
|[[Han, Icksang|AUTHOR Icksang Han]]|
|[[Han, Jeong-Im|AUTHOR Jeong-Im Han]]|
|[[Han, Jing|AUTHOR Jing Han]]|
|[[Han, Jinyoung|AUTHOR Jinyoung Han]]|
|[[Han, Jiqing|AUTHOR Jiqing Han]]|
|[[Han, Kyu J.|AUTHOR Kyu J. Han]]|
|[[Han, Min Hyun|AUTHOR Min Hyun Han]]|
|[[Han, Qilong|AUTHOR Qilong Han]]|
|[[Han, Seungju|AUTHOR Seungju Han]]|
|[[Han, Wei|AUTHOR Wei Han]]|
|[[Han, Youngho|AUTHOR Youngho Han]]|
|[[Hanjalic, Alan|AUTHOR Alan Hanjalic]]|
|[[Hannon, Daniel|AUTHOR Daniel Hannon]]|
|[[Hannun, Awni|AUTHOR Awni Hannun]]|
|[[Hansen, John H.L.|AUTHOR John H.L. Hansen]]|
|[[Hanssen, Judith|AUTHOR Judith Hanssen]]|
|[[Hantke, Simone|AUTHOR Simone Hantke]]|
|[[Hao, Xiang|AUTHOR Xiang Hao]]|
|[[Hao, Yunzhe|AUTHOR Yunzhe Hao]]|
|[[Hara, Sunao|AUTHOR Sunao Hara]]|
|[[Harada, Noboru|AUTHOR Noboru Harada]]|
|[[Hard, Andrew|AUTHOR Andrew Hard]]|
|[[Hariri, Salim|AUTHOR Salim Hariri]]|
|[[Härmä, Aki|AUTHOR Aki Härmä]]|
|[[Harmsen, W.|AUTHOR W. Harmsen]]|
|[[Haro, Gloria|AUTHOR Gloria Haro]]|
|[[Harrison, Philip|AUTHOR Philip Harrison]]|
|[[Harte, Naomi|AUTHOR Naomi Harte]]|
|[[Harwath, David|AUTHOR David Harwath]]|
|[[Hasan, Madina|AUTHOR Madina Hasan]]|
|[[Hasegawa-Johnson, Mark|AUTHOR Mark Hasegawa-Johnson]]|
|[[Hashimoto, Ayako|AUTHOR Ayako Hashimoto]]|
|[[Hashimoto, Kei|AUTHOR Kei Hashimoto]]|
|[[Haviv, Yinnon|AUTHOR Yinnon Haviv]]|
|[[Havtorn, Jakob D.|AUTHOR Jakob D. Havtorn]]|
|[[Hayashi, Tomoki|AUTHOR Tomoki Hayashi]]|
|[[He, Bingsheng|AUTHOR Bingsheng He]]|
|[[He, Lei|AUTHOR Lei He]]|
|[[He, Liang|AUTHOR Liang He]]|
|[[He, Qing|AUTHOR Qing He]]|
|[[He, Xiaodong|AUTHOR Xiaodong He]]|
|[[He, Yanzhang|AUTHOR Yanzhang He]]|
|[[He, Zhenhao|AUTHOR Zhenhao He]]|
|[[Hedayatnia, Behnam|AUTHOR Behnam Hedayatnia]]|
|[[Hegde, Rajesh M.|AUTHOR Rajesh M. Hegde]]|
|[[Heitkaemper, Jens|AUTHOR Jens Heitkaemper]]|
|[[Heitzman, Daragh|AUTHOR Daragh Heitzman]]|
|[[Helwani, Karim|AUTHOR Karim Helwani]]|
|[[Hempel, Adrian|AUTHOR Adrian Hempel]]|
|[[Heo, Hee-Soo|AUTHOR Hee-Soo Heo]]|
|[[Herff, Christian|AUTHOR Christian Herff]]|
|[[Hermansky, Hynek|AUTHOR Hynek Hermansky]]|
|[[Hernandez, Abner|AUTHOR Abner Hernandez]]|
|[[Hernandez, Angel|AUTHOR Angel Hernandez]]|
|[[Hernando, Javier|AUTHOR Javier Hernando]]|
|[[Heymann, Jahn|AUTHOR Jahn Heymann]]|
|[[Hidaka, Shunsuke|AUTHOR Shunsuke Hidaka]]|
|[[Higashiyama, Soichi|AUTHOR Soichi Higashiyama]]|
|[[Higuchi, Takuya|AUTHOR Takuya Higuchi]]|
|[[Higuchi, Yosuke|AUTHOR Yosuke Higuchi]]|
|[[Hikosaka, Shu|AUTHOR Shu Hikosaka]]|
|[[Hines, Andrew|AUTHOR Andrew Hines]]|
|[[Hiroe, Atsuo|AUTHOR Atsuo Hiroe]]|
|[[Hirschberg, Julia|AUTHOR Julia Hirschberg]]|
|[[Hirschi, Kevin|AUTHOR Kevin Hirschi]]|
|[[Ho, Minh Tri|AUTHOR Minh Tri Ho]]|
|[[Hoffmann, Ildikó|AUTHOR Ildikó Hoffmann]]|
|[[Hoffmeister, Björn|AUTHOR Björn Hoffmeister]]|
|[[Hojo, Nobukatsu|AUTHOR Nobukatsu Hojo]]|
|[[Højvang, Jesper Lisby|AUTHOR Jesper Lisby Højvang]]|
|[[Homma, Takeshi|AUTHOR Takeshi Homma]]|
|[[Honda, Kiyoshi|AUTHOR Kiyoshi Honda]]|
|[[Hong, Qingyang|AUTHOR Qingyang Hong]]|
|[[Hong, Sixin|AUTHOR Sixin Hong]]|
|[[Hong, Teakgyu|AUTHOR Teakgyu Hong]]|
|[[Hönig, Florian|AUTHOR Florian Hönig]]|
|[[Hono, Yukiya|AUTHOR Yukiya Hono]]|
|[[Hoory, Ron|AUTHOR Ron Hoory]]|
|[[Hope, Maxwell|AUTHOR Maxwell Hope]]|
|[[Horaud, Radu|AUTHOR Radu Horaud]]|
|[[Hori, Chiori|AUTHOR Chiori Hori]]|
|[[Hori, Takaaki|AUTHOR Takaaki Hori]]|
|[[Horiguchi, Shota|AUTHOR Shota Horiguchi]]|
|[[Hosseinkashi, Yasaman|AUTHOR Yasaman Hosseinkashi]]|
|[[Hou, Nana|AUTHOR Nana Hou]]|
|[[Hou, Weijian|AUTHOR Weijian Hou]]|
|[[Hou, Wenxin|AUTHOR Wenxin Hou]]|
|[[Hou, Xiaolei|AUTHOR Xiaolei Hou]]|
|[[Hou, Yu|AUTHOR Yu Hou]]|
|[[Hou, Yuanbo|AUTHOR Yuanbo Hou]]|
|[[Hough, Julian|AUTHOR Julian Hough]]|
|[[Houston, Brady|AUTHOR Brady Houston]]|
|[[Hsieh, Chu-Cheng|AUTHOR Chu-Cheng Hsieh]]|
|[[Hsieh, Feng-fan|AUTHOR Feng-fan Hsieh]]|
|[[Hsieh, Hsi-Wei|AUTHOR Hsi-Wei Hsieh]]|
|[[Hsu, Jui-Yang|AUTHOR Jui-Yang Hsu]]|
|[[Hsu, Po-Chien|AUTHOR Po-Chien Hsu]]|
|[[Hsu, Po-chun|AUTHOR Po-chun Hsu]]|
|[[Hsu, Wei-Ning|AUTHOR Wei-Ning Hsu]]|
|[[Hu, Fang|AUTHOR Fang Hu]]|
|[[Hu, Hu|AUTHOR Hu Hu]]|
|[[Hu, Kun|AUTHOR Kun Hu]]|
|[[Hu, Mathieu|AUTHOR Mathieu Hu]]|
|[[Hu, Na|AUTHOR Na Hu]]|
|[[Hu, Na|AUTHOR Na Hu]]|
|[[Hu, Peng|AUTHOR Peng Hu]]|
|[[Hu, Shengli|AUTHOR Shengli Hu]]|
|[[Hu, Shichao|AUTHOR Shichao Hu]]|
|[[Hu, Shoukang|AUTHOR Shoukang Hu]]|
|[[Hu, Wenchao|AUTHOR Wenchao Hu]]|
|[[Hu, Xinhui|AUTHOR Xinhui Hu]]|
|[[Hu, Yanxin|AUTHOR Yanxin Hu]]|
|[[Hu, Ying|AUTHOR Ying Hu]]|
|[[Hu, Yonggang|AUTHOR Yonggang Hu]]|
|[[Hu, Yushi|AUTHOR Yushi Hu]]|
|[[Huang, Binxuan|AUTHOR Binxuan Huang]]|
|[[Huang, Che-Wei|AUTHOR Che-Wei Huang]]|
|[[Huang, Dongyan|AUTHOR Dongyan Huang]]|
|[[Huang, Hao|AUTHOR Hao Huang]]|
|[[Huang, Jiaji|AUTHOR Jiaji Huang]]|
|[[Huang, Jian|AUTHOR Jian Huang]]|
|[[Huang, Jing|AUTHOR Jing Huang]]|
|[[Huang, Jing|AUTHOR Jing Huang]]|
|[[Huang, Jonathan|AUTHOR Jonathan Huang]]|
|[[Huang, Mingkun|AUTHOR Mingkun Huang]]|
|[[Huang, Qiang|AUTHOR Qiang Huang]]|
|[[Huang, Rongqing|AUTHOR Rongqing Huang]]|
|[[Huang, Ruizhe|AUTHOR Ruizhe Huang]]|
|[[Huang, Shanluo|AUTHOR Shanluo Huang]]|
|[[Huang, Wei|AUTHOR Wei Huang]]|
|[[Huang, Weilong|AUTHOR Weilong Huang]]|
|[[Huang, Wen-Chin|AUTHOR Wen-Chin Huang]]|
|[[Huang, Wenyong|AUTHOR Wenyong Huang]]|
|[[Huang, Yan|AUTHOR Yan Huang]]|
|[[Huang, Yiheng|AUTHOR Yiheng Huang]]|
|[[Huang, Yinghui|AUTHOR Yinghui Huang]]|
|[[Huang, Yu-Min|AUTHOR Yu-Min Huang]]|
|[[Huang, Zhaocheng|AUTHOR Zhaocheng Huang]]|
|[[Hubers, Ferdy|AUTHOR Ferdy Hubers]]|
|[[Hübschen, Tobias|AUTHOR Tobias Hübschen]]|
|[[Huckvale, Mark|AUTHOR Mark Huckvale]]|
|[[Hueber, Thomas|AUTHOR Thomas Hueber]]|
|[[Huerta, Cirenia|AUTHOR Cirenia Huerta]]|
|[[Hughes, Vincent|AUTHOR Vincent Hughes]]|
|[[Huh, Jaesung|AUTHOR Jaesung Huh]]|
|[[Hung, Hsiao-Tsung|AUTHOR Hsiao-Tsung Hung]]|
|[[Hung, Jeih-weih|AUTHOR Jeih-weih Hung]]|
|[[Huo, Jingjing|AUTHOR Jingjing Huo]]|
|[[Huo, Nan|AUTHOR Nan Huo]]|
|[[Hussain, Amir|AUTHOR Amir Hussain]]|
|[[Hutin, Mathilde|AUTHOR Mathilde Hutin]]|
|[[Hwang, Jiwon|AUTHOR Jiwon Hwang]]|
|[[Hwang, Min-Jae|AUTHOR Min-Jae Hwang]]|
|[[Hwang, Seung-won|AUTHOR Seung-won Hwang]]|
|[[Hwang, Sung Ju|AUTHOR Sung Ju Hwang]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Ibrahim, Zina|AUTHOR Zina Ibrahim]]|
|[[Igel, Christian|AUTHOR Christian Igel]]|
|[[Ihm, Hyeong Rae|AUTHOR Hyeong Rae Ihm]]|
|[[Ihori, Mana|AUTHOR Mana Ihori]]|
|[[Ijima, Yusuke|AUTHOR Yusuke Ijima]]|
|[[Ikeshita, Rintaro|AUTHOR Rintaro Ikeshita]]|
|[[Ikushima, Mirei|AUTHOR Mirei Ikushima]]|
|[[Ilin, Alexander|AUTHOR Alexander Ilin]]|
|[[Illa, Aravind|AUTHOR Aravind Illa]]|
|[[Illina, Irina|AUTHOR Irina Illina]]|
|[[Illium, Steffen|AUTHOR Steffen Illium]]|
|[[Imatomi, Setsuko|AUTHOR Setsuko Imatomi]]|
|[[Inaguma, Hirofumi|AUTHOR Hirofumi Inaguma]]|
|[[India, Miquel|AUTHOR Miquel India]]|
|[[Inoue, Koji|AUTHOR Koji Inoue]]|
|[[Ionescu, Radu Tudor|AUTHOR Radu Tudor Ionescu]]|
|[[Iranzo-Sánchez, Javier|AUTHOR Javier Iranzo-Sánchez]]|
|[[Irino, Toshio|AUTHOR Toshio Irino]]|
|[[Isaieva, Karyna|AUTHOR Karyna Isaieva]]|
|[[Ishihara, Tatsuma|AUTHOR Tatsuma Ishihara]]|
|[[Ishikawa, Keisuke|AUTHOR Keisuke Ishikawa]]|
|[[Ishtiaq, Samin|AUTHOR Samin Ishtiaq]]|
|[[Isik, Umut|AUTHOR Umut Isik]]|
|[[Issa, Elsayed Sabry Abdelaal|AUTHOR Elsayed Sabry Abdelaal Issa]]|
|[[Ito, Akinori|AUTHOR Akinori Ito]]|
|[[Ito, Hiroaki|AUTHOR Hiroaki Ito]]|
|[[Ivanov, Artem|AUTHOR Artem Ivanov]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Jadoul, Yannick|AUTHOR Yannick Jadoul]]|
|[[Jain, Abhilash|AUTHOR Abhilash Jain]]|
|[[Jain, Mahaveer|AUTHOR Mahaveer Jain]]|
|[[Jain, Taru|AUTHOR Taru Jain]]|
|[[Jalal, Md. Asif|AUTHOR Md. Asif Jalal]]|
|[[Jang, Won|AUTHOR Won Jang]]|
|[[Jansen, Aren|AUTHOR Aren Jansen]]|
|[[Janssen, Berit|AUTHOR Berit Janssen]]|
|[[Jatteau, Adèle|AUTHOR Adèle Jatteau]]|
|[[Jauhiainen, Tommi|AUTHOR Tommi Jauhiainen]]|
|[[Jayashankar, Tejas|AUTHOR Tejas Jayashankar]]|
|[[Jayasimha, Aditya|AUTHOR Aditya Jayasimha]]|
|[[Jayawardena, Sadari|AUTHOR Sadari Jayawardena]]|
|[[Jedlikowski, Przemyslaw|AUTHOR Przemyslaw Jedlikowski]]|
|[[Jensen, Carl|AUTHOR Carl Jensen]]|
|[[Jensen, Jesper|AUTHOR Jesper Jensen]]|
|[[Jensen, Søren H.|AUTHOR Søren H. Jensen]]|
|[[Jeong, Myeongho|AUTHOR Myeongho Jeong]]|
|[[Jeong, Seunghoon|AUTHOR Seunghoon Jeong]]|
|[[Ji, Wei|AUTHOR Wei Ji]]|
|[[Ji, Xuan|AUTHOR Xuan Ji]]|
|[[Ji, Yunyun|AUTHOR Yunyun Ji]]|
|[[Jia, Jia|AUTHOR Jia Jia]]|
|[[Jia, Xueli|AUTHOR Xueli Jia]]|
|[[Jia, Yan|AUTHOR Yan Jia]]|
|[[Jia, Ye|AUTHOR Ye Jia]]|
|[[Jiang, Dongwei|AUTHOR Dongwei Jiang]]|
|[[Jiang, Jyun-Yu|AUTHOR Jyun-Yu Jiang]]|
|[[Jiang, Tao|AUTHOR Tao Jiang]]|
|[[Jiang, Tao|AUTHOR Tao Jiang]]|
|[[Jiang, Xin|AUTHOR Xin Jiang]]|
|[[Jiang, Yiheng|AUTHOR Yiheng Jiang]]|
|[[Jiang, Ziyan|AUTHOR Ziyan Jiang]]|
|[[Jiang, Ziyue|AUTHOR Ziyue Jiang]]|
|[[Jin, Di|AUTHOR Di Jin]]|
|[[Jin, Hongxia|AUTHOR Hongxia Jin]]|
|[[Jin, Jiayu|AUTHOR Jiayu Jin]]|
|[[Jin, Qin|AUTHOR Qin Jin]]|
|[[Jin, Sichen|AUTHOR Sichen Jin]]|
|[[Jin, Yujia|AUTHOR Yujia Jin]]|
|[[Jin, Zeyu|AUTHOR Zeyu Jin]]|
|[[Jindal, Amit|AUTHOR Amit Jindal]]|
|[[Jing, Chengye|AUTHOR Chengye Jing]]|
|[[Jo, Yujin|AUTHOR Yujin Jo]]|
|[[Joachim, Dale|AUTHOR Dale Joachim]]|
|[[Joe, Myun-chul|AUTHOR Myun-chul Joe]]|
|[[Joglekar, Aditya|AUTHOR Aditya Joglekar]]|
|[[Johnson, Garett|AUTHOR Garett Johnson]]|
|[[Johnson, Khia A.|AUTHOR Khia A. Johnson]]|
|[[Johnson, Michael T.|AUTHOR Michael T. Johnson]]|
|[[Joly, Arnaud|AUTHOR Arnaud Joly]]|
|[[Joo, Young-Sun|AUTHOR Young-Sun Joo]]|
|[[Jorge, Javier|AUTHOR Javier Jorge]]|
|[[Jose, Christin|AUTHOR Christin Jose]]|
|[[Joseph, Anand|AUTHOR Anand Joseph]]|
|[[Joshi, Vikas|AUTHOR Vikas Joshi]]|
|[[Joty, Shafiq|AUTHOR Shafiq Joty]]|
|[[Jouvet, Denis|AUTHOR Denis Jouvet]]|
|[[Joy, Neethu M.|AUTHOR Neethu M. Joy]]|
|[[Juan, Alfons|AUTHOR Alfons Juan]]|
|[[Jui, Shangling|AUTHOR Shangling Jui]]|
|[[Julião, Mariana|AUTHOR Mariana Julião]]|
|[[Jung, Hyunhoon|AUTHOR Hyunhoon Jung]]|
|[[Jung, Jee-weon|AUTHOR Jee-weon Jung]]|
|[[Jung, Kyomin|AUTHOR Kyomin Jung]]|
|[[Jung, Myunghun|AUTHOR Myunghun Jung]]|
|[[Jung, Sunghwan|AUTHOR Sunghwan Jung]]|
|[[Jung, Youngmoon|AUTHOR Youngmoon Jung]]|
|[[Jurdak, Raja|AUTHOR Raja Jurdak]]|
|[[Juvela, Lauri|AUTHOR Lauri Juvela]]|
|[[Jyothi, Preethi|AUTHOR Preethi Jyothi]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Kaburagi, Tokihiko|AUTHOR Tokihiko Kaburagi]]|
|[[Kachkovskaia, Tatiana|AUTHOR Tatiana Kachkovskaia]]|
|[[Kadetotad, Deepak|AUTHOR Deepak Kadetotad]]|
|[[Kadiri, Sudarsana Reddy|AUTHOR Sudarsana Reddy Kadiri]]|
|[[Kahn, Jacob|AUTHOR Jacob Kahn]]|
|[[Kain, Alexander|AUTHOR Alexander Kain]]|
|[[Kakoulidis, Panos|AUTHOR Panos Kakoulidis]]|
|[[Kalinowski, Beniamin|AUTHOR Beniamin Kalinowski]]|
|[[Kamenev, Stanislav|AUTHOR Stanislav Kamenev]]|
|[[Kameoka, Hirokazu|AUTHOR Hirokazu Kameoka]]|
|[[Kaminskaïa, Svetlana|AUTHOR Svetlana Kaminskaïa]]|
|[[Kamper, Herman|AUTHOR Herman Kamper]]|
|[[Kanagawa, Hiroki|AUTHOR Hiroki Kanagawa]]|
|[[Kanda, Naoyuki|AUTHOR Naoyuki Kanda]]|
|[[Kaneko, Takuhiro|AUTHOR Takuhiro Kaneko]]|
|[[Kang, Hong-Goo|AUTHOR Hong-Goo Kang]]|
|[[Kang, Jiawen|AUTHOR Jiawen Kang]]|
|[[Kang, Jingu|AUTHOR Jingu Kang]]|
|[[Kang, Okim|AUTHOR Okim Kang]]|
|[[Kang, Shiyin|AUTHOR Shiyin Kang]]|
|[[Kang, Woo Hyun|AUTHOR Woo Hyun Kang]]|
|[[Kao, Chieh-Chi|AUTHOR Chieh-Chi Kao]]|
|[[Karadayi, Julien|AUTHOR Julien Karadayi]]|
|[[Karamichali, Eleni|AUTHOR Eleni Karamichali]]|
|[[Karita, Shigeki|AUTHOR Shigeki Karita]]|
|[[Karlapati, Sri|AUTHOR Sri Karlapati]]|
|[[Karnjana, Jessada|AUTHOR Jessada Karnjana]]|
|[[Karpov, Alexey|AUTHOR Alexey Karpov]]|
|[[Kashino, Kunio|AUTHOR Kunio Kashino]]|
|[[Kathania, Hemant|AUTHOR Hemant Kathania]]|
|[[Katsurada, Kouichi|AUTHOR Kouichi Katsurada]]|
|[[Kawahara, Tatsuya|AUTHOR Tatsuya Kawahara]]|
|[[Kawai, Hisashi|AUTHOR Hisashi Kawai]]|
|[[Kawakami, Kazuya|AUTHOR Kazuya Kawakami]]|
|[[Kawamura, Naoko|AUTHOR Naoko Kawamura]]|
|[[Kawanishi, Takahito|AUTHOR Takahito Kawanishi]]|
|[[Kaya, Heysem|AUTHOR Heysem Kaya]]|
|[[Ke, Dengfeng|AUTHOR Dengfeng Ke]]|
|[[Kegler, Mikolaj|AUTHOR Mikolaj Kegler]]|
|[[Kelly, Amelia C.|AUTHOR Amelia C. Kelly]]|
|[[Kelterer, Anneliese|AUTHOR Anneliese Kelterer]]|
|[[Kemp, Renee|AUTHOR Renee Kemp]]|
|[[Kenter, Tom|AUTHOR Tom Kenter]]|
|[[Keren, Gil|AUTHOR Gil Keren]]|
|[[Keromnes, Yvon|AUTHOR Yvon Keromnes]]|
|[[Keshet, Joseph|AUTHOR Joseph Keshet]]|
|[[Kethireddy, Rashmi|AUTHOR Rashmi Kethireddy]]|
|[[Khalifa, Sara|AUTHOR Sara Khalifa]]|
|[[Khalil, Hosam|AUTHOR Hosam Khalil]]|
|[[Khan, Umair|AUTHOR Umair Khan]]|
|[[Khandelwal, Kartik|AUTHOR Kartik Khandelwal]]|
|[[Khanna, Piyush|AUTHOR Piyush Khanna]]|
|[[Khare, Aparna|AUTHOR Aparna Khare]]|
|[[Khare, Shreya|AUTHOR Shreya Khare]]|
|[[Khokhlov, Yuri|AUTHOR Yuri Khokhlov]]|
|[[Khonglah, Banriskhem K.|AUTHOR Banriskhem K. Khonglah]]|
|[[Khudanpur, Sanjeev|AUTHOR Sanjeev Khudanpur]]|
|[[Khurana, Sameer|AUTHOR Sameer Khurana]]|
|[[Kikuchi, Yuki|AUTHOR Yuki Kikuchi]]|
|[[Kim, Bongwan|AUTHOR Bongwan Kim]]|
|[[Kim, Chanwoo|AUTHOR Chanwoo Kim]]|
|[[Kim, Daehyun|AUTHOR Daehyun Kim]]|
|[[Kim, Donghyeon|AUTHOR Donghyeon Kim]]|
|[[Kim, Dongyoung|AUTHOR Dongyoung Kim]]|
|[[Kim, Doo-young|AUTHOR Doo-young Kim]]|
|[[Kim, Eunmi|AUTHOR Eunmi Kim]]|
|[[Kim, Hoirin|AUTHOR Hoirin Kim]]|
|[[Kim, Hong Kook|AUTHOR Hong Kook Kim]]|
|[[Kim, Hyeji|AUTHOR Hyeji Kim]]|
|[[Kim, Hyun Ah|AUTHOR Hyun Ah Kim]]|
|[[Kim, Injung|AUTHOR Injung Kim]]|
|[[Kim, Jaebok|AUTHOR Jaebok Kim]]|
|[[Kim, Jae-Min|AUTHOR Jae-Min Kim]]|
|[[Kim, Jihwan|AUTHOR Jihwan Kim]]|
|[[Kim, Jin-Seob|AUTHOR Jin-Seob Kim]]|
|[[Kim, Jiyeon|AUTHOR Jiyeon Kim]]|
|[[Kim, Joo-Yeon|AUTHOR Joo-Yeon Kim]]|
|[[Kim, Ju-ho|AUTHOR Ju-ho Kim]]|
|[[Kim, Jung-Hee|AUTHOR Jung-Hee Kim]]|
|[[Kim, Kwangyoun|AUTHOR Kwangyoun Kim]]|
|[[Kim, Kyungho|AUTHOR Kyungho Kim]]|
|[[Kim, Minje|AUTHOR Minje Kim]]|
|[[Kim, Nam Soo|AUTHOR Nam Soo Kim]]|
|[[Kim, Sangki|AUTHOR Sangki Kim]]|
|[[Kim, Seung-bin|AUTHOR Seung-bin Kim]]|
|[[Kim, Soojin|AUTHOR Soojin Kim]]|
|[[Kim, Sooyeon|AUTHOR Sooyeon Kim]]|
|[[Kim, Sunghun|AUTHOR Sunghun Kim]]|
|[[Kim, Sunhee|AUTHOR Sunhee Kim]]|
|[[Kim, Yelin|AUTHOR Yelin Kim]]|
|[[Kim, Youngik|AUTHOR Youngik Kim]]|
|[[Kim, Young-Kil|AUTHOR Young-Kil Kim]]|
|[[Kimura, Akisato|AUTHOR Akisato Kimura]]|
|[[Kimura, Naoki|AUTHOR Naoki Kimura]]|
|[[King, Simon|AUTHOR Simon King]]|
|[[Kingsbury, Brian|AUTHOR Brian Kingsbury]]|
|[[Kinnunen, Tomi|AUTHOR Tomi Kinnunen]]|
|[[Kinoshita, Keisuke|AUTHOR Keisuke Kinoshita]]|
|[[Kirchhoff, Katrin|AUTHOR Katrin Kirchhoff]]|
|[[Kirkedal, Andreas|AUTHOR Andreas Kirkedal]]|
|[[Kishida, Takuya|AUTHOR Takuya Kishida]]|
|[[Kishore, Vinith|AUTHOR Vinith Kishore]]|
|[[Kiss, Imre|AUTHOR Imre Kiss]]|
|[[Kitahara, Mafuyu|AUTHOR Mafuyu Kitahara]]|
|[[Kitamura, Tatsuya|AUTHOR Tatsuya Kitamura]]|
|[[Kjartansson, Oddur|AUTHOR Oddur Kjartansson]]|
|[[Klabbers, Esther|AUTHOR Esther Klabbers]]|
|[[Klakow, Dietrich|AUTHOR Dietrich Klakow]]|
|[[Kleijn, W. Bastiaan|AUTHOR W. Bastiaan Kleijn]]|
|[[Kleinbauer, Thomas|AUTHOR Thomas Kleinbauer]]|
|[[Klewitz, Palle|AUTHOR Palle Klewitz]]|
|[[Klimkov, Viacheslav|AUTHOR Viacheslav Klimkov]]|
|[[Klumpp, Philipp|AUTHOR Philipp Klumpp]]|
|[[Knill, Kate M.|AUTHOR Kate M. Knill]]|
|[[Knister, Kate|AUTHOR Kate Knister]]|
|[[Ko, Hanseok|AUTHOR Hanseok Ko]]|
|[[Ko, Minsam|AUTHOR Minsam Ko]]|
|[[Ko, Tom|AUTHOR Tom Ko]]|
|[[Kobayashi, Kazuhiro|AUTHOR Kazuhiro Kobayashi]]|
|[[Kobayashi, Ryunosuke|AUTHOR Ryunosuke Kobayashi]]|
|[[Kobayashi, Tetsunori|AUTHOR Tetsunori Kobayashi]]|
|[[Kocharov, Daniil|AUTHOR Daniil Kocharov]]|
|[[Koda, Hiroki|AUTHOR Hiroki Koda]]|
|[[Kodrasi, Ina|AUTHOR Ina Kodrasi]]|
|[[Köhler, Thilo|AUTHOR Thilo Köhler]]|
|[[Koike, Tomoya|AUTHOR Tomoya Koike]]|
|[[Koishida, Kazuhito|AUTHOR Kazuhito Koishida]]|
|[[Koizumi, Yuma|AUTHOR Yuma Koizumi]]|
|[[Kolbæk, Morten|AUTHOR Morten Kolbæk]]|
|[[Kolossa, Dorothea|AUTHOR Dorothea Kolossa]]|
|[[Komatani, Kazunori|AUTHOR Kazunori Komatani]]|
|[[Konda, Vighnesh Reddy|AUTHOR Vighnesh Reddy Konda]]|
|[[Kong, Yehao|AUTHOR Yehao Kong]]|
|[[Kononenko, Natasha|AUTHOR Natasha Kononenko]]|
|[[Kons, Zvi|AUTHOR Zvi Kons]]|
|[[Koo, Junghyun|AUTHOR Junghyun Koo]]|
|[[Kopparapu, Sunil Kumar|AUTHOR Sunil Kumar Kopparapu]]|
|[[Korenevskaya, Mariya|AUTHOR Mariya Korenevskaya]]|
|[[Korenevsky, Maxim|AUTHOR Maxim Korenevsky]]|
|[[Koriyama, Tomoki|AUTHOR Tomoki Koriyama]]|
|[[Koshinaka, Takafumi|AUTHOR Takafumi Koshinaka]]|
|[[Kośmider, Michał|AUTHOR Michał Kośmider]]|
|[[Kotani, Gaku|AUTHOR Gaku Kotani]]|
|[[Kothapally, Vinay|AUTHOR Vinay Kothapally]]|
|[[Kothare, Hardik|AUTHOR Hardik Kothare]]|
|[[Koumparoulis, Alexandros|AUTHOR Alexandros Koumparoulis]]|
|[[Koushanfar, Farinaz|AUTHOR Farinaz Koushanfar]]|
|[[Kowalczyk, Konrad|AUTHOR Konrad Kowalczyk]]|
|[[Kozlov, Alexander|AUTHOR Alexander Kozlov]]|
|[[Krajewski, Jarek|AUTHOR Jarek Krajewski]]|
|[[Kreiman, Jody|AUTHOR Jody Kreiman]]|
|[[Kreuk, Felix|AUTHOR Felix Kreuk]]|
|[[Kreyssig, Florian L.|AUTHOR Florian L. Kreyssig]]|
|[[Krishnamohan, Venkat|AUTHOR Venkat Krishnamohan]]|
|[[Krishnamurthy, Rahul|AUTHOR Rahul Krishnamurthy]]|
|[[Krishnan, Prashant|AUTHOR Prashant Krishnan]]|
|[[Krishnaswamy, Arvindh|AUTHOR Arvindh Krishnaswamy]]|
|[[Krug, Paul K.|AUTHOR Paul K. Krug]]|
|[[Krusienski, Dean|AUTHOR Dean Krusienski]]|
|[[Kryzhanovskiy, Vladimir|AUTHOR Vladimir Kryzhanovskiy]]|
|[[Kuang, Jianjing|AUTHOR Jianjing Kuang]]|
|[[Kudinov, Mikhail|AUTHOR Mikhail Kudinov]]|
|[[Külebi, Baybars|AUTHOR Baybars Külebi]]|
|[[Kulis, Brian|AUTHOR Brian Kulis]]|
|[[Kulkarni, Ajinkya|AUTHOR Ajinkya Kulkarni]]|
|[[Kulko, Daniil|AUTHOR Daniil Kulko]]|
|[[Kumanati, Kenichi|AUTHOR Kenichi Kumanati]]|
|[[Kumar, Aiswarya Vinod|AUTHOR Aiswarya Vinod Kumar]]|
|[[Kumar, Ankur|AUTHOR Ankur Kumar]]|
|[[Kumar, Avinash|AUTHOR Avinash Kumar]]|
|[[Kumar, Kshitiz|AUTHOR Kshitiz Kumar]]|
|[[Kumar, Kunal|AUTHOR Kunal Kumar]]|
|[[Kumar, Mehul|AUTHOR Mehul Kumar]]|
|[[Kumar, Rajath|AUTHOR Rajath Kumar]]|
|[[Kumar, Rohit|AUTHOR Rohit Kumar]]|
|[[Kumar, Shankar|AUTHOR Shankar Kumar]]|
|[[Kumar M., Mano Ranjith|AUTHOR Mano Ranjith Kumar M.]]|
|[[Kumatani, Kenichi|AUTHOR Kenichi Kumatani]]|
|[[Kunzmann, Siegfried|AUTHOR Siegfried Kunzmann]]|
|[[Kuo, C.-C. Jay|AUTHOR C.-C. Jay Kuo]]|
|[[Kuo, Chia-Chih|AUTHOR Chia-Chih Kuo]]|
|[[Kuo, Hong-Kwang J.|AUTHOR Hong-Kwang J. Kuo]]|
|[[Kurata, Gakuto|AUTHOR Gakuto Kurata]]|
|[[Kurimo, Mikko|AUTHOR Mikko Kurimo]]|
|[[Kürzinger, Ludwig|AUTHOR Ludwig Kürzinger]]|
|[[Küster, Dennis|AUTHOR Dennis Küster]]|
|[[Kutsenko, Ksenia|AUTHOR Ksenia Kutsenko]]|
|[[Kwak, Donghyun|AUTHOR Donghyun Kwak]]|
|[[Kwiatkowska, Zuzanna|AUTHOR Zuzanna Kwiatkowska]]|
|[[Kwon, Ohsung|AUTHOR Ohsung Kwon]]|
|[[Kwon, Oh-Woog|AUTHOR Oh-Woog Kwon]]|
|[[Kwon, Yoohwan|AUTHOR Yoohwan Kwon]]|
|[[Kye, Seong Min|AUTHOR Seong Min Kye]]|
|[[Kyriakopoulos, Konstantinos|AUTHOR Konstantinos Kyriakopoulos]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[L., Akshay P.|AUTHOR Akshay P. L.]]|
|[[Laganaro, Marina|AUTHOR Marina Laganaro]]|
|[[Lai, Cheng-I|AUTHOR Cheng-I Lai]]|
|[[Lai, Riwei|AUTHOR Riwei Lai]]|
|[[Lai, Wei|AUTHOR Wei Lai]]|
|[[Lai, Ying-Hui|AUTHOR Ying-Hui Lai]]|
|[[Lakomkin, Egor|AUTHOR Egor Lakomkin]]|
|[[Lalain, Muriel|AUTHOR Muriel Lalain]]|
|[[Lalhminghlui, Wendy|AUTHOR Wendy Lalhminghlui]]|
|[[Lamel, Lori|AUTHOR Lori Lamel]]|
|[[Lammert, Adam C.|AUTHOR Adam C. Lammert]]|
|[[Lan, Yizhou|AUTHOR Yizhou Lan]]|
|[[Lancucki, Adrian|AUTHOR Adrian Lancucki]]|
|[[Lane, Nicholas D.|AUTHOR Nicholas D. Lane]]|
|[[Lang, Oran|AUTHOR Oran Lang]]|
|[[Lange, Patrick|AUTHOR Patrick Lange]]|
|[[Laprie, Yves|AUTHOR Yves Laprie]]|
|[[Laptev, Aleksandr|AUTHOR Aleksandr Laptev]]|
|[[Lastras, Luis|AUTHOR Luis Lastras]]|
|[[Latham, William|AUTHOR William Latham]]|
|[[Latif, Siddique|AUTHOR Siddique Latif]]|
|[[Laurent, Antoine|AUTHOR Antoine Laurent]]|
|[[Laurent, Prévot|AUTHOR Prévot Laurent]]|
|[[Laurenzo, Stella|AUTHOR Stella Laurenzo]]|
|[[Lauzon, Jean-Samuel|AUTHOR Jean-Samuel Lauzon]]|
|[[Lavechin, Marvin|AUTHOR Marvin Lavechin]]|
|[[Lavrentyeva, Galina|AUTHOR Galina Lavrentyeva]]|
|[[Le, Duc|AUTHOR Duc Le]]|
|[[Le, Quoc V.|AUTHOR Quoc V. Le]]|
|[[Leal, Isabel|AUTHOR Isabel Leal]]|
|[[Le Blouch, Olivier|AUTHOR Olivier Le Blouch]]|
|[[Lech, Margaret|AUTHOR Margaret Lech]]|
|[[Lee, Bong-Jin|AUTHOR Bong-Jin Lee]]|
|[[Lee, Bong-Ki|AUTHOR Bong-Ki Lee]]|
|[[Lee, Chanhee|AUTHOR Chanhee Lee]]|
|[[Lee, Chan Kyu|AUTHOR Chan Kyu Lee]]|
|[[Lee, Cheng-Kuang|AUTHOR Cheng-Kuang Lee]]|
|[[Lee, Chi-Chang|AUTHOR Chi-Chang Lee]]|
|[[Lee, Chi-Chun|AUTHOR Chi-Chun Lee]]|
|[[Lee, Chin-Hui|AUTHOR Chin-Hui Lee]]|
|[[Lee, Dongyub|AUTHOR Dongyub Lee]]|
|[[Lee, Grandee|AUTHOR Grandee Lee]]|
|[[Lee, Gyeong-Hoon|AUTHOR Gyeong-Hoon Lee]]|
|[[Lee, Hae Beom|AUTHOR Hae Beom Lee]]|
|[[Lee, Hung-yi|AUTHOR Hung-yi Lee]]|
|[[Lee, Jie Hwan|AUTHOR Jie Hwan Lee]]|
|[[Lee, Jinyoung|AUTHOR Jinyoung Lee]]|
|[[Lee, Joohyung|AUTHOR Joohyung Lee]]|
|[[Lee, Joun Yeop|AUTHOR Joun Yeop Lee]]|
|[[Lee, Junmo|AUTHOR Junmo Lee]]|
|[[Lee, Kathy Yuet-Sheung|AUTHOR Kathy Yuet-Sheung Lee]]|
|[[Lee, Kong Aik|AUTHOR Kong Aik Lee]]|
|[[Lee, Kyogu|AUTHOR Kyogu Lee]]|
|[[Lee, Lin-shan|AUTHOR Lin-shan Lee]]|
|[[Lee, Lou|AUTHOR Lou Lee]]|
|[[Lee, Minjae|AUTHOR Minjae Lee]]|
|[[Lee, Munyoung|AUTHOR Munyoung Lee]]|
|[[Lee, Sang-Hoon|AUTHOR Sang-Hoon Lee]]|
|[[Lee, SangJeong|AUTHOR SangJeong Lee]]|
|[[Lee, Sang-Woo|AUTHOR Sang-Woo Lee]]|
|[[Lee, Seanie|AUTHOR Seanie Lee]]|
|[[Lee, Seonghee|AUTHOR Seonghee Lee]]|
|[[Lee, Seong-Whan|AUTHOR Seong-Whan Lee]]|
|[[Lee, Sungjin|AUTHOR Sungjin Lee]]|
|[[Lee, Sungmin|AUTHOR Sungmin Lee]]|
|[[Lee, Tan|AUTHOR Tan Lee]]|
|[[Lee, Yeha|AUTHOR Yeha Lee]]|
|[[Lee, Yogaku|AUTHOR Yogaku Lee]]|
|[[Lee, Yoonhyung|AUTHOR Yoonhyung Lee]]|
|[[Lee, Young-yoon|AUTHOR Young-yoon Lee]]|
|[[Leff, Alexander P.|AUTHOR Alexander P. Leff]]|
|[[Lei, Guangzhi|AUTHOR Guangzhi Lei]]|
|[[Lei, Ming|AUTHOR Ming Lei]]|
|[[Lei, Tao|AUTHOR Tao Lei]]|
|[[Lei, Yun|AUTHOR Yun Lei]]|
|[[Lei, Zhenchun|AUTHOR Zhenchun Lei]]|
|[[Leino, Katri|AUTHOR Katri Leino]]|
|[[Leinonen, Juho|AUTHOR Juho Leinonen]]|
|[[Le Lan, Gaël|AUTHOR Gaël Le Lan]]|
|[[Le Maguer, Sébastien|AUTHOR Sébastien Le Maguer]]|
|[[Lemoine, Laurie|AUTHOR Laurie Lemoine]]|
|[[Lenain, Raphael|AUTHOR Raphael Lenain]]|
|[[Leong, Chee Wee|AUTHOR Chee Wee Leong]]|
|[[Lepp, Haley|AUTHOR Haley Lepp]]|
|[[Le Roux, Jonathan|AUTHOR Jonathan Le Roux]]|
|[[Leschanowsky, Anna|AUTHOR Anna Leschanowsky]]|
|[[Le The, Quoc-Nam|AUTHOR Quoc-Nam Le The]]|
|[[Letondor, Arnaud|AUTHOR Arnaud Letondor]]|
|[[Leung, Cheung-Chi|AUTHOR Cheung-Chi Leung]]|
|[[Leutnant, Volker|AUTHOR Volker Leutnant]]|
|[[Levis, John|AUTHOR John Levis]]|
|[[Levitan, Sarah Ita|AUTHOR Sarah Ita Levitan]]|
|[[Levow, Gina-Anne|AUTHOR Gina-Anne Levow]]|
|[[Levy, Joshua|AUTHOR Joshua Levy]]|
|[[Li, Aini|AUTHOR Aini Li]]|
|[[Li, Andong|AUTHOR Andong Li]]|
|[[Li, Bin|AUTHOR Bin Li]]|
|[[Li, Bo|AUTHOR Bo Li]]|
|[[Li, Bo|AUTHOR Bo Li]]|
|[[Li, Boxue|AUTHOR Boxue Li]]|
|[[Li, Chenda|AUTHOR Chenda Li]]|
|[[Li, Chia-Yu|AUTHOR Chia-Yu Li]]|
|[[Li, Dan|AUTHOR Dan Li]]|
|[[Li, Guanjun|AUTHOR Guanjun Li]]|
|[[Li, Haizhou|AUTHOR Haizhou Li]]|
|[[Li, Hang|AUTHOR Hang Li]]|
|[[Li, Hao|AUTHOR Hao Li]]|
|[[Li, Hao|AUTHOR Hao Li]]|
|[[Li, Haoyu|AUTHOR Haoyu Li]]|
|[[Li, Hua|AUTHOR Hua Li]]|
|[[Li, Jeng-Lin|AUTHOR Jeng-Lin Li]]|
|[[Li, Jialu|AUTHOR Jialu Li]]|
|[[Li, Jing|AUTHOR Jing Li]]|
|[[Li, Jingyu|AUTHOR Jingyu Li]]|
|[[Li, Jinyu|AUTHOR Jinyu Li]]|
|[[Li, Jixiang|AUTHOR Jixiang Li]]|
|[[Li, Kai|AUTHOR Kai Li]]|
|[[Li, Ke|AUTHOR Ke Li]]|
|[[Li, Kun|AUTHOR Kun Li]]|
|[[Li, Lantian|AUTHOR Lantian Li]]|
|[[Li, Li|AUTHOR Li Li]]|
|[[Li, Lin|AUTHOR Lin Li]]|
|[[Li, Mengrou|AUTHOR Mengrou Li]]|
|[[Li, Ming|AUTHOR Ming Li]]|
|[[Li, Na|AUTHOR Na Li]]|
|[[Li, Naihan|AUTHOR Naihan Li]]|
|[[Li, Nan|AUTHOR Nan Li]]|
|[[Li, Pei-Chun|AUTHOR Pei-Chun Li]]|
|[[Li, Qifei|AUTHOR Qifei Li]]|
|[[Li, Qing|AUTHOR Qing Li]]|
|[[Li, Rongjun|AUTHOR Rongjun Li]]|
|[[Li, Ruirui|AUTHOR Ruirui Li]]|
|[[Li, Runnan|AUTHOR Runnan Li]]|
|[[Li, Ruyun|AUTHOR Ruyun Li]]|
|[[Li, Shang-Wen|AUTHOR Shang-Wen Li]]|
|[[Li, Shanpeng|AUTHOR Shanpeng Li]]|
|[[Li, Sheng|AUTHOR Sheng Li]]|
|[[Li, Shengchen|AUTHOR Shengchen Li]]|
|[[Li, Shuyang|AUTHOR Shuyang Li]]|
|[[Li, Sixia|AUTHOR Sixia Li]]|
|[[Li, Song|AUTHOR Song Li]]|
|[[Li, Teng|AUTHOR Teng Li]]|
|[[Li, Tingle|AUTHOR Tingle Li]]|
|[[Li, Wei|AUTHOR Wei Li]]|
|[[Li, Wenqian|AUTHOR Wenqian Li]]|
|[[Li, Wubo|AUTHOR Wubo Li]]|
|[[Li, Wu-Jun|AUTHOR Wu-Jun Li]]|
|[[Li, Xiangang|AUTHOR Xiangang Li]]|
|[[Li, Xiao|AUTHOR Xiao Li]]|
|[[Li, Xiaodong|AUTHOR Xiaodong Li]]|
|[[Li, Xiaofei|AUTHOR Xiaofei Li]]|
|[[Li, Xiaoqi|AUTHOR Xiaoqi Li]]|
|[[Li, Ximin|AUTHOR Ximin Li]]|
|[[Li, Xinjian|AUTHOR Xinjian Li]]|
|[[Li, Xinwei|AUTHOR Xinwei Li]]|
|[[Li, Xinxing|AUTHOR Xinxing Li]]|
|[[Li, Xiulin|AUTHOR Xiulin Li]]|
|[[Li, Xu|AUTHOR Xu Li]]|
|[[Li, Yan|AUTHOR Yan Li]]|
|[[Li, Yanping|AUTHOR Yanping Li]]|
|[[Li, Yanping|AUTHOR Yanping Li]]|
|[[Li, Yaxing|AUTHOR Yaxing Li]]|
|[[Li, Yijie|AUTHOR Yijie Li]]|
|[[Li, Yinghao|AUTHOR Yinghao Li]]|
|[[Li, Yiyuan|AUTHOR Yiyuan Li]]|
|[[Li, Yongfu|AUTHOR Yongfu Li]]|
|[[Li, Yongwei|AUTHOR Yongwei Li]]|
|[[Li, Yun|AUTHOR Yun Li]]|
|[[Li, Yunpeng|AUTHOR Yunpeng Li]]|
|[[Li, Zeqian|AUTHOR Zeqian Li]]|
|[[Li, Zheng|AUTHOR Zheng Li]]|
|[[Li, Zhu|AUTHOR Zhu Li]]|
|[[Li, Zijin|AUTHOR Zijin Li]]|
|[[Liakata, Maria|AUTHOR Maria Liakata]]|
|[[Lian, Chongyuan|AUTHOR Chongyuan Lian]]|
|[[Lian, Zheng|AUTHOR Zheng Lian]]|
|[[Liang, Beici|AUTHOR Beici Liang]]|
|[[Liang, Chuming|AUTHOR Chuming Liang]]|
|[[Liang, Jiaen|AUTHOR Jiaen Liang]]|
|[[Liang, Shan|AUTHOR Shan Liang]]|
|[[Liang, Tianyu|AUTHOR Tianyu Liang]]|
|[[Liang, Wendy|AUTHOR Wendy Liang]]|
|[[Liang, Xiangyu|AUTHOR Xiangyu Liang]]|
|[[Liao, Chien-Feng|AUTHOR Chien-Feng Liao]]|
|[[Liden, Lars|AUTHOR Lars Liden]]|
|[[Liew, Seng Pei|AUTHOR Seng Pei Liew]]|
|[[Likhomanenko, Tatiana|AUTHOR Tatiana Likhomanenko]]|
|[[Lilley, Jason|AUTHOR Jason Lilley]]|
|[[Lim, Dan|AUTHOR Dan Lim]]|
|[[Lim, Heuiseok|AUTHOR Heuiseok Lim]]|
|[[Limonard, S.|AUTHOR S. Limonard]]|
|[[Lin, Baihan|AUTHOR Baihan Lin]]|
|[[Lin, Binghuai|AUTHOR Binghuai Lin]]|
|[[Lin, Chun|AUTHOR Chun Lin]]|
|[[Lin, Edward|AUTHOR Edward Lin]]|
|[[Lin, Hsuan-Tien|AUTHOR Hsuan-Tien Lin]]|
|[[Lin, Hui|AUTHOR Hui Lin]]|
|[[Lin, Ju|AUTHOR Ju Lin]]|
|[[Lin, Qingjian|AUTHOR Qingjian Lin]]|
|[[Lin, Shoufeng|AUTHOR Shoufeng Lin]]|
|[[Lin, Wei-Cheng|AUTHOR Wei-Cheng Lin]]|
|[[Lin, Weiwei|AUTHOR Weiwei Lin]]|
|[[Lin, Yi|AUTHOR Yi Lin]]|
|[[Lin, Yi|AUTHOR Yi Lin]]|
|[[Lin, Yu-Chen|AUTHOR Yu-Chen Lin]]|
|[[Lin, Yue|AUTHOR Yue Lin]]|
|[[Lin, Yun-Shao|AUTHOR Yun-Shao Lin]]|
|[[Lin, Yuqin|AUTHOR Yuqin Lin]]|
|[[Lin, Zhaojiang|AUTHOR Zhaojiang Lin]]|
|[[Lin, Zhenchao|AUTHOR Zhenchao Lin]]|
|[[Lindae, Nicolas|AUTHOR Nicolas Lindae]]|
|[[Lindgren, Matias|AUTHOR Matias Lindgren]]|
|[[Ling, Zhen-Hua|AUTHOR Zhen-Hua Ling]]|
|[[Linnhoff-Popien, Claudia|AUTHOR Claudia Linnhoff-Popien]]|
|[[Liptchinsky, Vitaliy|AUTHOR Vitaliy Liptchinsky]]|
|[[Liscombe, Jackson|AUTHOR Jackson Liscombe]]|
|[[Liu, Alexander H.|AUTHOR Alexander H. Liu]]|
|[[Liu, Andy T.|AUTHOR Andy T. Liu]]|
|[[Liu, Bin|AUTHOR Bin Liu]]|
|[[Liu, Changhong|AUTHOR Changhong Liu]]|
|[[Liu, Changliang|AUTHOR Changliang Liu]]|
|[[Liu, Chaojun|AUTHOR Chaojun Liu]]|
|[[Liu, Chen|AUTHOR Chen Liu]]|
|[[Liu, Chi-Liang|AUTHOR Chi-Liang Liu]]|
|[[Liu, Chunxi|AUTHOR Chunxi Liu]]|
|[[Liu, Danni|AUTHOR Danni Liu]]|
|[[Liu, Da-Rong|AUTHOR Da-Rong Liu]]|
|[[Liu, Dong|AUTHOR Dong Liu]]|
|[[Liu, Haohe|AUTHOR Haohe Liu]]|
|[[Liu, Hong|AUTHOR Hong Liu]]|
|[[Liu, Hongyi|AUTHOR Hongyi Liu]]|
|[[Liu, Hui|AUTHOR Hui Liu]]|
|[[Liu, Ian|AUTHOR Ian Liu]]|
|[[Liu, Jen-Yu|AUTHOR Jen-Yu Liu]]|
|[[Liu, Jianming|AUTHOR Jianming Liu]]|
|[[Liu, Jiaxing|AUTHOR Jiaxing Liu]]|
|[[Liu, Juan|AUTHOR Juan Liu]]|
|[[Liu, Kai|AUTHOR Kai Liu]]|
|[[Liu, Lin|AUTHOR Lin Liu]]|
|[[Liu, Lingling|AUTHOR Lingling Liu]]|
|[[Liu, Liquan|AUTHOR Liquan Liu]]|
|[[Liu, Meng|AUTHOR Meng Liu]]|
|[[Liu, Ming|AUTHOR Ming Liu]]|
|[[Liu, Peng|AUTHOR Peng Liu]]|
|[[Liu, Pengfei|AUTHOR Pengfei Liu]]|
|[[Liu, Qingsong|AUTHOR Qingsong Liu]]|
|[[Liu, Renjie|AUTHOR Renjie Liu]]|
|[[Liu, Ruiqi|AUTHOR Ruiqi Liu]]|
|[[Liu, Rujie|AUTHOR Rujie Liu]]|
|[[Liu, Ruolan|AUTHOR Ruolan Liu]]|
|[[Liu, Shan|AUTHOR Shan Liu]]|
|[[Liu, Shansong|AUTHOR Shansong Liu]]|
|[[Liu, Shujie|AUTHOR Shujie Liu]]|
|[[Liu, Shuo|AUTHOR Shuo Liu]]|
|[[Liu, Songxiang|AUTHOR Songxiang Liu]]|
|[[Liu, Souxiang|AUTHOR Souxiang Liu]]|
|[[Liu, Tianchi|AUTHOR Tianchi Liu]]|
|[[Liu, Wen|AUTHOR Wen Liu]]|
|[[Liu, Wenju|AUTHOR Wenju Liu]]|
|[[Liu, Xuechen|AUTHOR Xuechen Liu]]|
|[[Liu, Xuefei|AUTHOR Xuefei Liu]]|
|[[Liu, Xuejie|AUTHOR Xuejie Liu]]|
|[[Liu, Xunying|AUTHOR Xunying Liu]]|
|[[Liu, Yang|AUTHOR Yang Liu]]|
|[[Liu, Yanqing|AUTHOR Yanqing Liu]]|
|[[Liu, Yi-Ching|AUTHOR Yi-Ching Liu]]|
|[[Liu, Ying|AUTHOR Ying Liu]]|
|[[Liu, Yi|AUTHOR Yi Liu]]|
|[[Liu, Yi|AUTHOR Yi Liu]]|
|[[Liu, Yue|AUTHOR Yue Liu]]|
|[[Liu, Yun|AUTHOR Yun Liu]]|
|[[Liu, Zhang|AUTHOR Zhang Liu]]|
|[[Liu, Zhaoyu|AUTHOR Zhaoyu Liu]]|
|[[Liu, Zhe|AUTHOR Zhe Liu]]|
|[[Liu, Zhengchen|AUTHOR Zhengchen Liu]]|
|[[Liu, Zhijun|AUTHOR Zhijun Liu]]|
|[[Liu, Zhilei|AUTHOR Zhilei Liu]]|
|[[Liu, Zhixin|AUTHOR Zhixin Liu]]|
|[[Liu, Zihan|AUTHOR Zihan Liu]]|
|[[Liu, Zirui|AUTHOR Zirui Liu]]|
|[[Liu, Zuozhen|AUTHOR Zuozhen Liu]]|
|[[Livescu, Karen|AUTHOR Karen Livescu]]|
|[[Lleida, Eduardo|AUTHOR Eduardo Lleida]]|
|[[Lo, Chen-Chou|AUTHOR Chen-Chou Lo]]|
|[[Lo, Tien-Hong|AUTHOR Tien-Hong Lo]]|
|[[Logan, James|AUTHOR James Logan]]|
|[[Lohrenz, Timo|AUTHOR Timo Lohrenz]]|
|[[Long, Yanhua|AUTHOR Yanhua Long]]|
|[[Łopatka, Kuba|AUTHOR Kuba Łopatka]]|
|[[López-Espejo, Iván|AUTHOR Iván López-Espejo]]|
|[[Lopez Moreno, Ignacio|AUTHOR Ignacio Lopez Moreno]]|
|[[Lorenzo-Trueba, Jaime|AUTHOR Jaime Lorenzo-Trueba]]|
|[[Lorin, Louis-Marie|AUTHOR Louis-Marie Lorin]]|
|[[Loukina, Anastassia|AUTHOR Anastassia Loukina]]|
|[[Loweimi, Erfan|AUTHOR Erfan Loweimi]]|
|[[Lozano-Diez, Alicia|AUTHOR Alicia Lozano-Diez]]|
|[[Lu, Chunhui|AUTHOR Chunhui Lu]]|
|[[Lu, Han|AUTHOR Han Lu]]|
|[[Lu, Heng|AUTHOR Heng Lu]]|
|[[Lu, Huanhua|AUTHOR Huanhua Lu]]|
|[[Lu, Jing|AUTHOR Jing Lu]]|
|[[Lu, JinHong|AUTHOR JinHong Lu]]|
|[[Lu, Liang|AUTHOR Liang Lu]]|
|[[Lu, Peiling|AUTHOR Peiling Lu]]|
|[[Lu, Renée|AUTHOR Renée Lu]]|
|[[Lu, Weiyi|AUTHOR Weiyi Lu]]|
|[[Lu, Wenhuan|AUTHOR Wenhuan Lu]]|
|[[Lu, Xugang|AUTHOR Xugang Lu]]|
|[[Lu, Yen-Ju|AUTHOR Yen-Ju Lu]]|
|[[Lu, Yiting|AUTHOR Yiting Lu]]|
|[[Lu, Yizhou|AUTHOR Yizhou Lu]]|
|[[Luan, Huan|AUTHOR Huan Luan]]|
|[[Luan, Jian|AUTHOR Jian Luan]]|
|[[Ludusan, Bogdan|AUTHOR Bogdan Ludusan]]|
|[[Lui, Simon|AUTHOR Simon Lui]]|
|[[Lumban Tobing, Patrick|AUTHOR Patrick Lumban Tobing]]|
|[[Luo, Cheng|AUTHOR Cheng Luo]]|
|[[Luo, Haoneng|AUTHOR Haoneng Luo]]|
|[[Luo, Hongyin|AUTHOR Hongyin Luo]]|
|[[Luo, Jian|AUTHOR Jian Luo]]|
|[[Luo, Mingqiong|AUTHOR Mingqiong Luo]]|
|[[Luo, Shang-Bao|AUTHOR Shang-Bao Luo]]|
|[[Luo, Xuewen|AUTHOR Xuewen Luo]]|
|[[Luo, Yi|AUTHOR Yi Luo]]|
|[[Luo, Yi|AUTHOR Yi Luo]]|
|[[Luo, Zhaojie|AUTHOR Zhaojie Luo]]|
|[[Luong, Chi Mai|AUTHOR Chi Mai Luong]]|
|[[Luz, Saturnino|AUTHOR Saturnino Luz]]|
|[[Lv, Hang|AUTHOR Hang Lv]]|
|[[Lv, Shubo|AUTHOR Shubo Lv]]|
|[[Lyons, Terry|AUTHOR Terry Lyons]]|
|[[Lyu, Siwei|AUTHOR Siwei Lyu]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[M., Gurunath Reddy|AUTHOR Gurunath Reddy M.]]|
|[[Ma, Bin|AUTHOR Bin Ma]]|
|[[Ma, Feng|AUTHOR Feng Ma]]|
|[[Ma, Jiteng|AUTHOR Jiteng Ma]]|
|[[Ma, Jun|AUTHOR Jun Ma]]|
|[[Ma, Shiqian|AUTHOR Shiqian Ma]]|
|[[Ma, Shuang|AUTHOR Shuang Ma]]|
|[[Ma, Tao|AUTHOR Tao Ma]]|
|[[Ma, Xutai|AUTHOR Xutai Ma]]|
|[[Ma, Yi|AUTHOR Yi Ma]]|
|[[Maaløe, Lars|AUTHOR Lars Maaløe]]|
|[[Maas, Roland|AUTHOR Roland Maas]]|
|[[MacIntyre, Alexis Deighton|AUTHOR Alexis Deighton MacIntyre]]|
|[[Mack, Wolfgang|AUTHOR Wolfgang Mack]]|
|[[MacWhinney, Brian|AUTHOR Brian MacWhinney]]|
|[[Madhavi, Maulik|AUTHOR Maulik Madhavi]]|
|[[Madikeri, Srikanth|AUTHOR Srikanth Madikeri]]|
|[[Madotto, Andrea|AUTHOR Andrea Madotto]]|
|[[Maes, Pattie|AUTHOR Pattie Maes]]|
|[[Maffei, Marc F.|AUTHOR Marc F. Maffei]]|
|[[Magalhães, João|AUTHOR João Magalhães]]|
|[[Magalie, Ochs|AUTHOR Ochs Magalie]]|
|[[Magimai-Doss, Mathew|AUTHOR Mathew Magimai-Doss]]|
|[[Mahadeokar, Jay|AUTHOR Jay Mahadeokar]]|
|[[Maier, Andreas|AUTHOR Andreas Maier]]|
|[[Majumdar, Somshubra|AUTHOR Somshubra Majumdar]]|
|[[Mak, Brian|AUTHOR Brian Mak]]|
|[[Mak, Man-Wai|AUTHOR Man-Wai Mak]]|
|[[Makino, Shoji|AUTHOR Shoji Makino]]|
|[[Makishima, Naoki|AUTHOR Naoki Makishima]]|
|[[Malinin, Andrey|AUTHOR Andrey Malinin]]|
|[[Mallela, Jhansi|AUTHOR Jhansi Mallela]]|
|[[Mallol-Ragolta, Adria|AUTHOR Adria Mallol-Ragolta]]|
|[[Mamontov, Danila|AUTHOR Danila Mamontov]]|
|[[Mana, Franco|AUTHOR Franco Mana]]|
|[[Manakul, Potsawee|AUTHOR Potsawee Manakul]]|
|[[Mandel, Michael I.|AUTHOR Michael I. Mandel]]|
|[[Mandell, Ari|AUTHOR Ari Mandell]]|
|[[Manghat, Sreeja|AUTHOR Sreeja Manghat]]|
|[[Manghat, Sreeram|AUTHOR Sreeram Manghat]]|
|[[Maniati, Georgia|AUTHOR Georgia Maniati]]|
|[[Mannem, Renuka|AUTHOR Renuka Mannem]]|
|[[Manocha, Pranay|AUTHOR Pranay Manocha]]|
|[[Manohar, Vimal|AUTHOR Vimal Manohar]]|
|[[Mao, Huanru Henry|AUTHOR Huanru Henry Mao]]|
|[[Mao, Qirong|AUTHOR Qirong Mao]]|
|[[Mao, Shuiyang|AUTHOR Shuiyang Mao]]|
|[[Maor, Ronnie|AUTHOR Ronnie Maor]]|
|[[Maouche, Mohamed|AUTHOR Mohamed Maouche]]|
|[[Marcel, Sébastien|AUTHOR Sébastien Marcel]]|
|[[Markitantov, Maxim|AUTHOR Maxim Markitantov]]|
|[[Markó, Alexandra|AUTHOR Alexandra Markó]]|
|[[Markopoulos, Konstantinos|AUTHOR Konstantinos Markopoulos]]|
|[[Marques, Nuno|AUTHOR Nuno Marques]]|
|[[Martin, Joshua L.|AUTHOR Joshua L. Martin]]|
|[[Martinc, Matej|AUTHOR Matej Martinc]]|
|[[Martín-Doñas, Juan M.|AUTHOR Juan M. Martín-Doñas]]|
|[[Martinez-Lucas, Luz|AUTHOR Luz Martinez-Lucas]]|
|[[Martins, Isabel P.|AUTHOR Isabel P. Martins]]|
|[[Marxer, Ricard|AUTHOR Ricard Marxer]]|
|[[Maselli, Lorenzo|AUTHOR Lorenzo Maselli]]|
|[[Masuda, Issey|AUTHOR Issey Masuda]]|
|[[Masumura, Ryo|AUTHOR Ryo Masumura]]|
|[[Masztalski, Piotr|AUTHOR Piotr Masztalski]]|
|[[Matassoni, Marco|AUTHOR Marco Matassoni]]|
|[[Matějka, Pavel|AUTHOR Pavel Matějka]]|
|[[Mathews, Rajiv|AUTHOR Rajiv Mathews]]|
|[[Mathur, Akhil|AUTHOR Akhil Mathur]]|
|[[Mathur, Puneet|AUTHOR Puneet Mathur]]|
|[[Matrouf, Driss|AUTHOR Driss Matrouf]]|
|[[Matsui, Sanae|AUTHOR Sanae Matsui]]|
|[[Matsumoto, Kento|AUTHOR Kento Matsumoto]]|
|[[Matsunaga, Noriyuki|AUTHOR Noriyuki Matsunaga]]|
|[[Matsuura, Kohei|AUTHOR Kohei Matsuura]]|
|[[Mattina, Matthew|AUTHOR Matthew Mattina]]|
|[[Mattock, Karen|AUTHOR Karen Mattock]]|
|[[Matusevych, Sergiy|AUTHOR Sergiy Matusevych]]|
|[[Matuszewski, Mateusz|AUTHOR Mateusz Matuszewski]]|
|[[Mau, Ted|AUTHOR Ted Mau]]|
|[[Mauclair, Julie|AUTHOR Julie Mauclair]]|
|[[Mavandadi, Sepand|AUTHOR Sepand Mavandadi]]|
|[[Mawalim, Candy Olivia|AUTHOR Candy Olivia Mawalim]]|
|[[Maybery, Murray|AUTHOR Murray Maybery]]|
|[[Mazalov, Vadim|AUTHOR Vadim Mazalov]]|
|[[McAuley, Julian|AUTHOR Julian McAuley]]|
|[[McClendon, Jerome L.|AUTHOR Jerome L. McClendon]]|
|[[McCree, Alan|AUTHOR Alan McCree]]|
|[[McDuff, Daniel|AUTHOR Daniel McDuff]]|
|[[McGraw, Ian|AUTHOR Ian McGraw]]|
|[[McGuire, Grant L.|AUTHOR Grant L. McGuire]]|
|[[McKenna, Joseph P.|AUTHOR Joseph P. McKenna]]|
|[[McLoughlin, Ian|AUTHOR Ian McLoughlin]]|
|[[Md. Salleh, Siti Umairah|AUTHOR Siti Umairah Md. Salleh]]|
|[[Medennikov, Ivan|AUTHOR Ivan Medennikov]]|
|[[Mehrotra, Abhinav|AUTHOR Abhinav Mehrotra]]|
|[[Mehta, Rupeshkumar|AUTHOR Rupeshkumar Mehta]]|
|[[Mehta, Rupesh R.|AUTHOR Rupesh R. Mehta]]|
|[[Memon, Shahan Ali|AUTHOR Shahan Ali Memon]]|
|[[Mendonça, John|AUTHOR John Mendonça]]|
|[[Mendoza Ramos, V.|AUTHOR V. Mendoza Ramos]]|
|[[Meng, Helen|AUTHOR Helen Meng]]|
|[[Meng, Jian|AUTHOR Jian Meng]]|
|[[Meng, Qinglin|AUTHOR Qinglin Meng]]|
|[[Meng, Xuanbo|AUTHOR Xuanbo Meng]]|
|[[Meng, Zhong|AUTHOR Zhong Meng]]|
|[[Menshikova, Alla|AUTHOR Alla Menshikova]]|
|[[Merboldt, André|AUTHOR André Merboldt]]|
|[[Mesgarani, Nima|AUTHOR Nima Mesgarani]]|
|[[Messner, Eva-Maria|AUTHOR Eva-Maria Messner]]|
|[[Metze, Florian|AUTHOR Florian Metze]]|
|[[Meunier, Christine|AUTHOR Christine Meunier]]|
|[[Meunier, Fanny|AUTHOR Fanny Meunier]]|
|[[Meyer, Bernd T.|AUTHOR Bernd T. Meyer]]|
|[[Meyer, Julien|AUTHOR Julien Meyer]]|
|[[Mhiri, Mohamed|AUTHOR Mohamed Mhiri]]|
|[[Michael, Thilo|AUTHOR Thilo Michael]]|
|[[Michaud, François|AUTHOR François Michaud]]|
|[[Michel, Wilfried|AUTHOR Wilfried Michel]]|
|[[Michelas, Amandine|AUTHOR Amandine Michelas]]|
|[[Michelsanti, Daniel|AUTHOR Daniel Michelsanti]]|
|[[Miedzinski, Bogdan|AUTHOR Bogdan Miedzinski]]|
|[[Miguel, Antonio|AUTHOR Antonio Miguel]]|
|[[Milde, Benjamin|AUTHOR Benjamin Milde]]|
|[[Millard, David|AUTHOR David Millard]]|
|[[Millet, Juliette|AUTHOR Juliette Millet]]|
|[[Milner, Rosanna|AUTHOR Rosanna Milner]]|
|[[Mimura, Masato|AUTHOR Masato Mimura]]|
|[[Min, Kyoungbo|AUTHOR Kyoungbo Min]]|
|[[Min, Kyungbo|AUTHOR Kyungbo Min]]|
|[[Minematsu, Nobuaki|AUTHOR Nobuaki Minematsu]]|
|[[Mingote, Victoria|AUTHOR Victoria Mingote]]|
|[[Minker, Wolfgang|AUTHOR Wolfgang Minker]]|
|[[Mirheidari, Bahman|AUTHOR Bahman Mirheidari]]|
|[[Mishchenko, Yuriy|AUTHOR Yuriy Mishchenko]]|
|[[Mishima, Sakiko|AUTHOR Sakiko Mishima]]|
|[[Mishra, Jagabandhu|AUTHOR Jagabandhu Mishra]]|
|[[Misiunas, Karolis|AUTHOR Karolis Misiunas]]|
|[[Misra, Ananya|AUTHOR Ananya Misra]]|
|[[Mitrofanov, Anton|AUTHOR Anton Mitrofanov]]|
|[[Mitsui, Kentaro|AUTHOR Kentaro Mitsui]]|
|[[Mittag, Gabriel|AUTHOR Gabriel Mittag]]|
|[[Mittal, Ashish|AUTHOR Ashish Mittal]]|
|[[Mizgajski, Jan|AUTHOR Jan Mizgajski]]|
|[[Mizoguchi, Ai|AUTHOR Ai Mizoguchi]]|
|[[Mizushima, Ryo|AUTHOR Ryo Mizushima]]|
|[[Mo, Tong|AUTHOR Tong Mo]]|
|[[Möbius, Bernd|AUTHOR Bernd Möbius]]|
|[[Mohamed, Abdelrahman|AUTHOR Abdelrahman Mohamed]]|
|[[Mohan, Devang S. Ram|AUTHOR Devang S. Ram Mohan]]|
|[[Moinet, Alexis|AUTHOR Alexis Moinet]]|
|[[Möller, Sebastian|AUTHOR Sebastian Möller]]|
|[[Moniz, Helena|AUTHOR Helena Moniz]]|
|[[Montacié, Claude|AUTHOR Claude Montacié]]|
|[[Montalvão, Jugurta|AUTHOR Jugurta Montalvão]]|
|[[Montalvo, Ana|AUTHOR Ana Montalvo]]|
|[[Montillot, Justine|AUTHOR Justine Montillot]]|
|[[Moore, Meredith|AUTHOR Meredith Moore]]|
|[[Moore, Roger K.|AUTHOR Roger K. Moore]]|
|[[Morais, Edmilson da Silva|AUTHOR Edmilson da Silva Morais]]|
|[[Morchid, Mohamed|AUTHOR Mohamed Morchid]]|
|[[Moreno, Pedro J.|AUTHOR Pedro J. Moreno]]|
|[[Mori, Hiroki|AUTHOR Hiroki Mori]]|
|[[Mori, Koichiro|AUTHOR Koichiro Mori]]|
|[[Morin, Emmanuel|AUTHOR Emmanuel Morin]]|
|[[Morita, Takashi|AUTHOR Takashi Morita]]|
|[[Moritz, Niko|AUTHOR Niko Moritz]]|
|[[Moriya, Takafumi|AUTHOR Takafumi Moriya]]|
|[[Moro-Velázquez, Laureano|AUTHOR Laureano Moro-Velázquez]]|
|[[Morrison, Max|AUTHOR Max Morrison]]|
|[[Mortazavi, Masood S.|AUTHOR Masood S. Mortazavi]]|
|[[Morzy, Mikołaj|AUTHOR Mikołaj Morzy]]|
|[[Motlicek, Petr|AUTHOR Petr Motlicek]]|
|[[Mouchtaris, Athanasios|AUTHOR Athanasios Mouchtaris]]|
|[[Moulin, Pierre|AUTHOR Pierre Moulin]]|
|[[Mower Provost, Emily|AUTHOR Emily Mower Provost]]|
|[[Mukherjee, Arijit|AUTHOR Arijit Mukherjee]]|
|[[Mulder, Kimberley|AUTHOR Kimberley Mulder]]|
|[[Mulholland, Matthew|AUTHOR Matthew Mulholland]]|
|[[Mullally, Niall|AUTHOR Niall Mullally]]|
|[[Müller, Robert|AUTHOR Robert Müller]]|
|[[Mumtaz, Benazir|AUTHOR Benazir Mumtaz]]|
|[[Mun, Seongkyu|AUTHOR Seongkyu Mun]]|
|[[Mun, Sung Hwan|AUTHOR Sung Hwan Mun]]|
|[[Murali, Vignesh|AUTHOR Vignesh Murali]]|
|[[Murthy, B.H.V.S. Narayana|AUTHOR B.H.V.S. Narayana Murthy]]|
|[[Murthy, Hema A.|AUTHOR Hema A. Murthy]]|
|[[Myer, Samuel|AUTHOR Samuel Myer]]|
|[[Mysore, Gautham J.|AUTHOR Gautham J. Mysore]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[N., Krishna D.|AUTHOR Krishna D. N.]]|
|[[Na, Yueyue|AUTHOR Yueyue Na]]|
|[[Naderi, Babak|AUTHOR Babak Naderi]]|
|[[Nagamatsu, Kenji|AUTHOR Kenji Nagamatsu]]|
|[[Nagrani, Arsha|AUTHOR Arsha Nagrani]]|
|[[Naik, Devang|AUTHOR Devang Naik]]|
|[[Naini, Abinay Reddy|AUTHOR Abinay Reddy Naini]]|
|[[Nakagawa, Takashi|AUTHOR Takashi Nakagawa]]|
|[[Nakagome, Yu|AUTHOR Yu Nakagome]]|
|[[Nakamura, Satoshi|AUTHOR Satoshi Nakamura]]|
|[[Nakanishi, Noriko|AUTHOR Noriko Nakanishi]]|
|[[Nakashika, Toru|AUTHOR Toru Nakashika]]|
|[[Nakatani, Tomohiro|AUTHOR Tomohiro Nakatani]]|
|[[Nallanthighal, Venkata Srikanth|AUTHOR Venkata Srikanth Nallanthighal]]|
|[[Nam, Kihyun|AUTHOR Kihyun Nam]]|
|[[Namboodiri, Vinay P.|AUTHOR Vinay P. Namboodiri]]|
|[[Nanayakkara, Suranga|AUTHOR Suranga Nanayakkara]]|
|[[Nankaku, Yoshihiko|AUTHOR Yoshihiko Nankaku]]|
|[[Narayanan, Arun|AUTHOR Arun Narayanan]]|
|[[Narayanan, Shrikanth|AUTHOR Shrikanth Narayanan]]|
|[[Narayanaswamy, Vivek|AUTHOR Vivek Narayanaswamy]]|
|[[Nathwani, Karan|AUTHOR Karan Nathwani]]|
|[[Nautsch, Andreas|AUTHOR Andreas Nautsch]]|
|[[Navya, A.|AUTHOR A. Navya]]|
|[[Nechaev, Yaroslav|AUTHOR Yaroslav Nechaev]]|
|[[Negri, Matteo|AUTHOR Matteo Negri]]|
|[[Neitsch, Jana|AUTHOR Jana Neitsch]]|
|[[Nekvinda, Tomáš|AUTHOR Tomáš Nekvinda]]|
|[[Nercessian, Shahan|AUTHOR Shahan Nercessian]]|
|[[Neumann, Michael|AUTHOR Michael Neumann]]|
|[[Nevado-Holgado, Alejo J.|AUTHOR Alejo J. Nevado-Holgado]]|
|[[Ney, Hermann|AUTHOR Hermann Ney]]|
|[[Ng, Cymie Wing-Yee|AUTHOR Cymie Wing-Yee Ng]]|
|[[Ng, Manwa L.|AUTHOR Manwa L. Ng]]|
|[[Ng, Si-Ioi|AUTHOR Si-Ioi Ng]]|
|[[Nguyen, Cameron|AUTHOR Cameron Nguyen]]|
|[[Nguyen, Ha|AUTHOR Ha Nguyen]]|
|[[Nguyen, Hieu Duy|AUTHOR Hieu Duy Nguyen]]|
|[[Nguyen, Quang Minh|AUTHOR Quang Minh Nguyen]]|
|[[Nguyen, Thai Binh|AUTHOR Thai Binh Nguyen]]|
|[[Nguyen, Thai-Son|AUTHOR Thai-Son Nguyen]]|
|[[Nguyen, Thi Thu Hien|AUTHOR Thi Thu Hien Nguyen]]|
|[[Nguyen, Tinh|AUTHOR Tinh Nguyen]]|
|[[Nguyen, Trung Hieu|AUTHOR Trung Hieu Nguyen]]|
|[[Nguyen, Tuan-Nam|AUTHOR Tuan-Nam Nguyen]]|
|[[Ni, Chongjia|AUTHOR Chongjia Ni]]|
|[[Nicolis, Marco|AUTHOR Marco Nicolis]]|
|[[Nicolson, Aaron|AUTHOR Aaron Nicolson]]|
|[[Nie, Shuai|AUTHOR Shuai Nie]]|
|[[Nie, Yuanfei|AUTHOR Yuanfei Nie]]|
|[[Niebuhr, Oliver|AUTHOR Oliver Niebuhr]]|
|[[Niehues, Jan|AUTHOR Jan Niehues]]|
|[[Nika, Marily|AUTHOR Marily Nika]]|
|[[Ning, Jinghong|AUTHOR Jinghong Ning]]|
|[[Niranjan, Abhishek|AUTHOR Abhishek Niranjan]]|
|[[Nishida, Kyosuke|AUTHOR Kyosuke Nishida]]|
|[[Niu, Di|AUTHOR Di Niu]]|
|[[Niu, Sufeng|AUTHOR Sufeng Niu]]|
|[[Niu, Xiaochuan|AUTHOR Xiaochuan Niu]]|
|[[Noé, Paul-Gauthier|AUTHOR Paul-Gauthier Noé]]|
|[[Noh, Hyeong-Rae|AUTHOR Hyeong-Rae Noh]]|
|[[Nortje, Leanne|AUTHOR Leanne Nortje]]|
|[[Nose, Takashi|AUTHOR Takashi Nose]]|
|[[Nöth, Elmar|AUTHOR Elmar Nöth]]|
|[[Novikova, Jekaterina|AUTHOR Jekaterina Novikova]]|
|[[Novitasari, Sashi|AUTHOR Sashi Novitasari]]|
|[[Novoselov, Sergey|AUTHOR Sergey Novoselov]]|
|[[Novotný, Ondvrej|AUTHOR Ondvrej Novotný]]|
|[[Nugraha, Aditya Arie|AUTHOR Aditya Arie Nugraha]]|
|[[Nwogu, Ifeoma|AUTHOR Ifeoma Nwogu]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[O, Gyeonghwan|AUTHOR Gyeonghwan O]]|
|[[Oak, Mayuresh Sanjay|AUTHOR Mayuresh Sanjay Oak]]|
|[[O’Brien, Anne|AUTHOR Anne O’Brien]]|
|[[Ochiai, Tsubasa|AUTHOR Tsubasa Ochiai]]|
|[[O’Connor, Matt|AUTHOR Matt O’Connor]]|
|[[Ogawa, Atsunori|AUTHOR Atsunori Ogawa]]|
|[[Ogawa, Tetsuji|AUTHOR Tetsuji Ogawa]]|
|[[Oglic, Dino|AUTHOR Dino Oglic]]|
|[[Oh, Dongsuk|AUTHOR Dongsuk Oh]]|
|[[Ohishi, Yasunori|AUTHOR Yasunori Ohishi]]|
|[[Ohtani, Yamato|AUTHOR Yamato Ohtani]]|
|[[Okabe, Koji|AUTHOR Koji Okabe]]|
|[[Okamoto, Takuma|AUTHOR Takuma Okamoto]]|
|[[Öktem, Alp|AUTHOR Alp Öktem]]|
|[[Olfati, Negar|AUTHOR Negar Olfati]]|
|[[Olvera, Michel|AUTHOR Michel Olvera]]|
|[[Omachi, Motoi|AUTHOR Motoi Omachi]]|
|[[O’Malley, Ronan|AUTHOR Ronan O’Malley]]|
|[[Omologo, Maurizio|AUTHOR Maurizio Omologo]]|
|[[Ondel, Lucas|AUTHOR Lucas Ondel]]|
|[[Onishi, Kotaro|AUTHOR Kotaro Onishi]]|
|[[Oplustil Gallegos, Pilar|AUTHOR Pilar Oplustil Gallegos]]|
|[[O’Regan, Robert|AUTHOR Robert O’Regan]]|
|[[Orihashi, Shota|AUTHOR Shota Orihashi]]|
|[[Orozco-Arroyave, Juan Rafael|AUTHOR Juan Rafael Orozco-Arroyave]]|
|[[Ortega, Alfonso|AUTHOR Alfonso Ortega]]|
|[[Ostendorf, Mari|AUTHOR Mari Ostendorf]]|
|[[Ou, Zhijian|AUTHOR Zhijian Ou]]|
|[[Oura, Keiichiro|AUTHOR Keiichiro Oura]]|
|[[Ouyang, Peng|AUTHOR Peng Ouyang]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Pal, Vaishali|AUTHOR Vaishali Pal]]|
|[[Paliwal, Kuldip K.|AUTHOR Kuldip K. Paliwal]]|
|[[Palkama, Kasperi|AUTHOR Kasperi Palkama]]|
|[[Palmer, Jeffrey|AUTHOR Jeffrey Palmer]]|
|[[Pan, Huashan|AUTHOR Huashan Pan]]|
|[[Pan, Jia|AUTHOR Jia Pan]]|
|[[Pan, Jing|AUTHOR Jing Pan]]|
|[[Pan, Yilin|AUTHOR Yilin Pan]]|
|[[Pan, Zexu|AUTHOR Zexu Pan]]|
|[[Panahi, Issa M.S.|AUTHOR Issa M.S. Panahi]]|
|[[Panchanathan, Sethuraman|AUTHOR Sethuraman Panchanathan]]|
|[[Panda, Ashish|AUTHOR Ashish Panda]]|
|[[Pandey, Ashutosh|AUTHOR Ashutosh Pandey]]|
|[[Pandey, Prabhat|AUTHOR Prabhat Pandey]]|
|[[Pandia D.S., Karthik|AUTHOR Karthik Pandia D.S.]]|
|[[Pang, Ruoming|AUTHOR Ruoming Pang]]|
|[[Pankajakshan, Arjun|AUTHOR Arjun Pankajakshan]]|
|[[Pannala, Vishala|AUTHOR Vishala Pannala]]|
|[[Pantazis, Yannis|AUTHOR Yannis Pantazis]]|
|[[Papadimitriou, Katerina|AUTHOR Katerina Papadimitriou]]|
|[[Papadopoulos, Pavlos|AUTHOR Pavlos Papadopoulos]]|
|[[Papi, Sara|AUTHOR Sara Papi]]|
|[[Pappagari, Raghavendra|AUTHOR Raghavendra Pappagari]]|
|[[Papreja, Piyush|AUTHOR Piyush Papreja]]|
|[[Parada-Cabaleiro, Emilia|AUTHOR Emilia Parada-Cabaleiro]]|
|[[Paramasivam, Periyasamy|AUTHOR Periyasamy Paramasivam]]|
|[[Paraskevopoulos, Georgios|AUTHOR Georgios Paraskevopoulos]]|
|[[Parasu, Prasanth|AUTHOR Prasanth Parasu]]|
|[[Parcollet, Titouan|AUTHOR Titouan Parcollet]]|
|[[Parde, Natalie|AUTHOR Natalie Parde]]|
|[[Pariente, Manuel|AUTHOR Manuel Pariente]]|
|[[Park, Daniel S.|AUTHOR Daniel S. Park]]|
|[[Park, Eunil|AUTHOR Eunil Park]]|
|[[Park, Heayoung|AUTHOR Heayoung Park]]|
|[[Park, Hyoungmin|AUTHOR Hyoungmin Park]]|
|[[Park, Inyoung|AUTHOR Inyoung Park]]|
|[[Park, Jaihyun|AUTHOR Jaihyun Park]]|
|[[Park, Jinhwan|AUTHOR Jinhwan Park]]|
|[[Park, Junmo|AUTHOR Junmo Park]]|
|[[Park, Kyubyong|AUTHOR Kyubyong Park]]|
|[[Park, Sangjun|AUTHOR Sangjun Park]]|
|[[Park, Seung-won|AUTHOR Seung-won Park]]|
|[[Park, Soo Jin|AUTHOR Soo Jin Park]]|
|[[Parkhomenko, Denis|AUTHOR Denis Parkhomenko]]|
|[[Parmar, Niki|AUTHOR Niki Parmar]]|
|[[Parmonangan, Ivan Halim|AUTHOR Ivan Halim Parmonangan]]|
|[[Parrot, Maud|AUTHOR Maud Parrot]]|
|[[Parslow, Nicholas|AUTHOR Nicholas Parslow]]|
|[[Parthasarathy, Sarangarajan|AUTHOR Sarangarajan Parthasarathy]]|
|[[Parthasarathy, Srinivas|AUTHOR Srinivas Parthasarathy]]|
|[[Partridge, Kurt|AUTHOR Kurt Partridge]]|
|[[Pascual, Santiago|AUTHOR Santiago Pascual]]|
|[[Patil, Ankita|AUTHOR Ankita Patil]]|
|[[Patino, Jose|AUTHOR Jose Patino]]|
|[[Paul, Dipjyoti|AUTHOR Dipjyoti Paul]]|
|[[Paulik, Matthias|AUTHOR Matthias Paulik]]|
|[[Pautler, David|AUTHOR David Pautler]]|
|[[Pedersen, Mathias B.|AUTHOR Mathias B. Pedersen]]|
|[[Peiró-Lilja, Alex|AUTHOR Alex Peiró-Lilja]]|
|[[Pekhovsky, Timur|AUTHOR Timur Pekhovsky]]|
|[[Pelecanos, Jason|AUTHOR Jason Pelecanos]]|
|[[Peng, Chao|AUTHOR Chao Peng]]|
|[[Peng, Fuchun|AUTHOR Fuchun Peng]]|
|[[Peng, Gang|AUTHOR Gang Peng]]|
|[[Peng, Junyi|AUTHOR Junyi Peng]]|
|[[Peng, Li|AUTHOR Li Peng]]|
|[[Peng, Renhua|AUTHOR Renhua Peng]]|
|[[Penney, Joshua|AUTHOR Joshua Penney]]|
|[[Perez, Matthew|AUTHOR Matthew Perez]]|
|[[Pérez-Ramón, Rubén|AUTHOR Rubén Pérez-Ramón]]|
|[[Pérez-Toro, Paula Andrea|AUTHOR Paula Andrea Pérez-Toro]]|
|[[Pérez Zarazaga, Pablo|AUTHOR Pablo Pérez Zarazaga]]|
|[[Pernkopf, Franz|AUTHOR Franz Pernkopf]]|
|[[Pernon, Michaela|AUTHOR Michaela Pernon]]|
|[[Perrotin, Olivier|AUTHOR Olivier Perrotin]]|
|[[Peter, Varghese|AUTHOR Varghese Peter]]|
|[[Petridis, Stavros|AUTHOR Stavros Petridis]]|
|[[Peyser, Cal|AUTHOR Cal Peyser]]|
|[[Pfeifenberger, Lukas|AUTHOR Lukas Pfeifenberger]]|
|[[Pham, Minh|AUTHOR Minh Pham]]|
|[[Pham, Ngoc-Quan|AUTHOR Ngoc-Quan Pham]]|
|[[Pham, Van Tung|AUTHOR Van Tung Pham]]|
|[[Phansalkar, Neerad|AUTHOR Neerad Phansalkar]]|
|[[Piaskowski, Karol|AUTHOR Karol Piaskowski]]|
|[[Piérard, Adrien|AUTHOR Adrien Piérard]]|
|[[Pierron, Laurent|AUTHOR Laurent Pierron]]|
|[[Pietquin, Olivier|AUTHOR Olivier Pietquin]]|
|[[Pincus, Nadya|AUTHOR Nadya Pincus]]|
|[[Pino, Juan|AUTHOR Juan Pino]]|
|[[Pinquier, Julien|AUTHOR Julien Pinquier]]|
|[[Pirogova, Elena|AUTHOR Elena Pirogova]]|
|[[Plank, Barbara|AUTHOR Barbara Plank]]|
|[[Plchot, Oldřich|AUTHOR Oldřich Plchot]]|
|[[Póczos, Barnabás|AUTHOR Barnabás Póczos]]|
|[[Poddar, Aayushi|AUTHOR Aayushi Poddar]]|
|[[Podluzhny, Ivan|AUTHOR Ivan Podluzhny]]|
|[[Pollak, Senja|AUTHOR Senja Pollak]]|
|[[Polnik, Bartosz|AUTHOR Bartosz Polnik]]|
|[[Polyak, Adam|AUTHOR Adam Polyak]]|
|[[Pompili, Anna|AUTHOR Anna Pompili]]|
|[[Popov, Vadim|AUTHOR Vadim Popov]]|
|[[Pota, Stéphane|AUTHOR Stéphane Pota]]|
|[[Potamianos, Alexandros|AUTHOR Alexandros Potamianos]]|
|[[Potamianos, Gerasimos|AUTHOR Gerasimos Potamianos]]|
|[[Povey, Daniel|AUTHOR Daniel Povey]]|
|[[Prabhavalkar, Rohit|AUTHOR Rohit Prabhavalkar]]|
|[[Prakash, Anusha|AUTHOR Anusha Prakash]]|
|[[Prasanna, S.R. Mahadeva|AUTHOR S.R. Mahadeva Prasanna]]|
|[[Pratap, Vineel|AUTHOR Vineel Pratap]]|
|[[Predeck, Kristin|AUTHOR Kristin Predeck]]|
|[[Preux, Philippe|AUTHOR Philippe Preux]]|
|[[Prévot, Laurent|AUTHOR Laurent Prévot]]|
|[[Prieto, Santi|AUTHOR Santi Prieto]]|
|[[Prisyach, Tatiana|AUTHOR Tatiana Prisyach]]|
|[[Priyadharshini, Veeramani|AUTHOR Veeramani Priyadharshini]]|
|[[Pu, Songbai|AUTHOR Songbai Pu]]|
|[[Pulugundla, Bhargav|AUTHOR Bhargav Pulugundla]]|
|[[Punjabi, Surabhi|AUTHOR Surabhi Punjabi]]|
|[[Purohit, Tilak|AUTHOR Tilak Purohit]]|
|[[Purushothaman, Anurenjan|AUTHOR Anurenjan Purushothaman]]|
|[[Purver, Matthew|AUTHOR Matthew Purver]]|
|[[Pusch, Arne|AUTHOR Arne Pusch]]|
|[[Pushpavathi, M.|AUTHOR M. Pushpavathi]]|
|[[Pyo, Jaewoo|AUTHOR Jaewoo Pyo]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Qi, Jun|AUTHOR Jun Qi]]|
|[[Qian, Kun|AUTHOR Kun Qian]]|
|[[Qian, Xinyuan|AUTHOR Xinyuan Qian]]|
|[[Qian, Yanmin|AUTHOR Yanmin Qian]]|
|[[Qian, Yao|AUTHOR Yao Qian]]|
|[[Qiang, Chunyu|AUTHOR Chunyu Qiang]]|
|[[Qiao, Tingting|AUTHOR Tingting Qiao]]|
|[[Qin, James|AUTHOR James Qin]]|
|[[Qin, Lei|AUTHOR Lei Qin]]|
|[[Qin, Tao|AUTHOR Tao Qin]]|
|[[Qin, Xiaowei|AUTHOR Xiaowei Qin]]|
|[[Qin, Xiaoyi|AUTHOR Xiaoyi Qin]]|
|[[Qin, Ying|AUTHOR Ying Qin]]|
|[[Qiu, Xinchi|AUTHOR Xinchi Qiu]]|
|[[Qiu, Yuanhang|AUTHOR Yuanhang Qiu]]|
|[[Qiu, Zimeng|AUTHOR Zimeng Qiu]]|
|[[Qu, Leyuan|AUTHOR Leyuan Qu]]|
|[[Qu, Tianshu|AUTHOR Tianshu Qu]]|
|[[Qu, Xiaoyang|AUTHOR Xiaoyang Qu]]|
|[[Quatieri, Thomas F.|AUTHOR Thomas F. Quatieri]]|
|[[Quillot, Mathias|AUTHOR Mathias Quillot]]|
|[[Quintas, Sebastião|AUTHOR Sebastião Quintas]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[R., Hima Jyothi|AUTHOR Hima Jyothi R.]]|
|[[R., Nirmala|AUTHOR Nirmala R.]]|
|[[Radfar, Martin|AUTHOR Martin Radfar]]|
|[[Rafaely, Boaz|AUTHOR Boaz Rafaely]]|
|[[Ragano, Alessandro|AUTHOR Alessandro Ragano]]|
|[[Raikar, Aditya|AUTHOR Aditya Raikar]]|
|[[Raina, Vyas|AUTHOR Vyas Raina]]|
|[[Raissi, Tina|AUTHOR Tina Raissi]]|
|[[Raitio, Tuomo|AUTHOR Tuomo Raitio]]|
|[[Raj, Bhiksha|AUTHOR Bhiksha Raj]]|
|[[Rajan, Padmanabhan|AUTHOR Padmanabhan Rajan]]|
|[[Rajan, Rajeev|AUTHOR Rajeev Rajan]]|
|[[Raju, Anirudh|AUTHOR Anirudh Raju]]|
|[[Ramabhadran, Bhuvana|AUTHOR Bhuvana Ramabhadran]]|
|[[Ramakrishna, Anil|AUTHOR Anil Ramakrishna]]|
|[[Ramakrishnan, Ganesh|AUTHOR Ganesh Ramakrishnan]]|
|[[Ramamurthy, Ranjani|AUTHOR Ranjani Ramamurthy]]|
|[[Ramanarayanan, Vikram|AUTHOR Vikram Ramanarayanan]]|
|[[Ramoji, Shreyas|AUTHOR Shreyas Ramoji]]|
|[[Ramos, Alberto Gil C.P.|AUTHOR Alberto Gil C.P. Ramos]]|
|[[Rana, Puneet|AUTHOR Puneet Rana]]|
|[[Rana, Rajib|AUTHOR Rajib Rana]]|
|[[Rao, Ch.V. Rama|AUTHOR Ch.V. Rama Rao]]|
|[[Rao, Hrishikesh|AUTHOR Hrishikesh Rao]]|
|[[Rao, K. Sreenivasa|AUTHOR K. Sreenivasa Rao]]|
|[[Rao, Milind|AUTHOR Milind Rao]]|
|[[Rao, Preeti|AUTHOR Preeti Rao]]|
|[[Rao, Wei|AUTHOR Wei Rao]]|
|[[Rao M.V., Achuth|AUTHOR Achuth Rao M.V.]]|
|[[Raptis, Spyros|AUTHOR Spyros Raptis]]|
|[[Räsänen, Okko|AUTHOR Okko Räsänen]]|
|[[Rasilo, Heikki|AUTHOR Heikki Rasilo]]|
|[[Rasipuram, Ramya|AUTHOR Ramya Rasipuram]]|
|[[Rasmussen, Morten Højfeldt|AUTHOR Morten Højfeldt Rasmussen]]|
|[[Rastrow, Ariya|AUTHOR Ariya Rastrow]]|
|[[Ravanelli, Mirco|AUTHOR Mirco Ravanelli]]|
|[[Raveh, Eran|AUTHOR Eran Raveh]]|
|[[Ravi, Vijay|AUTHOR Vijay Ravi]]|
|[[Reddy, Chandan K.A.|AUTHOR Chandan K.A. Reddy]]|
|[[Reddy, Pradeep|AUTHOR Pradeep Reddy]]|
|[[Reis, Andrew|AUTHOR Andrew Reis]]|
|[[Ren, Bo|AUTHOR Bo Ren]]|
|[[Ren, Shuo|AUTHOR Shuo Ren]]|
|[[Ren, Yanzhen|AUTHOR Yanzhen Ren]]|
|[[Ren, Yi|AUTHOR Yi Ren]]|
|[[Ren, Yuling|AUTHOR Yuling Ren]]|
|[[Ren, Zhao|AUTHOR Zhao Ren]]|
|[[Renals, Steve|AUTHOR Steve Renals]]|
|[[Renders, Jean-Michel|AUTHOR Jean-Michel Renders]]|
|[[Rennies, Jan|AUTHOR Jan Rennies]]|
|[[Repyevsky, Sergey|AUTHOR Sergey Repyevsky]]|
|[[Reuber, Markus|AUTHOR Markus Reuber]]|
|[[Revow, Michael|AUTHOR Michael Revow]]|
|[[Rhee, Nari|AUTHOR Nari Rhee]]|
|[[Riad, Rachid|AUTHOR Rachid Riad]]|
|[[Richmond, Korin|AUTHOR Korin Richmond]]|
|[[Richter, Julius|AUTHOR Julius Richter]]|
|[[Rigoll, Gerhard|AUTHOR Gerhard Rigoll]]|
|[[Ristea, Nicolae-Cătălin|AUTHOR Nicolae-Cătălin Ristea]]|
|[[Riveiro, Juan Carlos|AUTHOR Juan Carlos Riveiro]]|
|[[Rivera, Clara|AUTHOR Clara Rivera]]|
|[[Rizos, Georgios|AUTHOR Georgios Rizos]]|
|[[Roberts, Angela|AUTHOR Angela Roberts]]|
|[[Roblek, Dominik|AUTHOR Dominik Roblek]]|
|[[Rodehorst, Mike|AUTHOR Mike Rodehorst]]|
|[[Roesler, Oliver|AUTHOR Oliver Roesler]]|
|[[Rohanian, Morteza|AUTHOR Morteza Rohanian]]|
|[[Rohdin, Johan|AUTHOR Johan Rohdin]]|
|[[Rohnke, Jonas|AUTHOR Jonas Rohnke]]|
|[[Rolland, Thomas|AUTHOR Thomas Rolland]]|
|[[Rollwage, Christian|AUTHOR Christian Rollwage]]|
|[[Romana, Amrit|AUTHOR Amrit Romana]]|
|[[Romanenko, Aleksei|AUTHOR Aleksei Romanenko]]|
|[[Romaniuk, Michal|AUTHOR Michal Romaniuk]]|
|[[Ronanki, Srikanth|AUTHOR Srikanth Ronanki]]|
|[[Rondon, Pat|AUTHOR Pat Rondon]]|
|[[Rose, Phil|AUTHOR Phil Rose]]|
|[[Rosenberg, Andrew|AUTHOR Andrew Rosenberg]]|
|[[Rosenkranz, T.|AUTHOR T. Rosenkranz]]|
|[[Ross, Arun|AUTHOR Arun Ross]]|
|[[Rouhe, Aku|AUTHOR Aku Rouhe]]|
|[[Roustay Vishkasougheh, Mehrdad|AUTHOR Mehrdad Roustay Vishkasougheh]]|
|[[Rowe, Hannah P.|AUTHOR Hannah P. Rowe]]|
|[[Rownicka, Joanna|AUTHOR Joanna Rownicka]]|
|[[Roy, Sujan Kumar|AUTHOR Sujan Kumar Roy]]|
|[[Rozenberg, Shai|AUTHOR Shai Rozenberg]]|
|[[Ruan, Min|AUTHOR Min Ruan]]|
|[[Ruan, Weitong|AUTHOR Weitong Ruan]]|
|[[Rudzicz, Frank|AUTHOR Frank Rudzicz]]|
|[[Rybach, David|AUTHOR David Rybach]]|
|[[Rybakov, Oleg|AUTHOR Oleg Rybakov]]|
|[[Rybicka, Magdalena|AUTHOR Magdalena Rybicka]]|
|[[Rykaczewski, Krzysztof|AUTHOR Krzysztof Rykaczewski]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[S., Adharsh|AUTHOR Adharsh S.]]|
|[[Saam, Christian|AUTHOR Christian Saam]]|
|[[Sabu, Kamini|AUTHOR Kamini Sabu]]|
|[[Sabzi Shahrebabaki, Abdolreza|AUTHOR Abdolreza Sabzi Shahrebabaki]]|
|[[Sadekova, Tasnima|AUTHOR Tasnima Sadekova]]|
|[[Sadhu, Samik|AUTHOR Samik Sadhu]]|
|[[Saeb, Armin|AUTHOR Armin Saeb]]|
|[[Saeki, Takaaki|AUTHOR Takaaki Saeki]]|
|[[Sáez-Trigueros, Daniel|AUTHOR Daniel Sáez-Trigueros]]|
|[[Safari, Pooyan|AUTHOR Pooyan Safari]]|
|[[Sager, Jacob|AUTHOR Jacob Sager]]|
|[[Saglam, Mert|AUTHOR Mert Saglam]]|
|[[Sagot, Beno^ıt|AUTHOR Beno^ıt Sagot]]|
|[[Saha, Pramit|AUTHOR Pramit Saha]]|
|[[Sahidullah, Md.|AUTHOR Md. Sahidullah]]|
|[[Sailor, Hardik B.|AUTHOR Hardik B. Sailor]]|
|[[Sainath, Tara N.|AUTHOR Tara N. Sainath]]|
|[[Saito, Daisuke|AUTHOR Daisuke Saito]]|
|[[Saito, Shoichiro|AUTHOR Shoichiro Saito]]|
|[[Saito, Yuki|AUTHOR Yuki Saito]]|
|[[Sak, Hasim|AUTHOR Hasim Sak]]|
|[[Sakai, Shinsuke|AUTHOR Shinsuke Sakai]]|
|[[Sakti, Sakriani|AUTHOR Sakriani Sakti]]|
|[[Salah, Albert Ali|AUTHOR Albert Ali Salah]]|
|[[Salameh, Mohammad|AUTHOR Mohammad Salameh]]|
|[[Salami Kavaki, Hassan|AUTHOR Hassan Salami Kavaki]]|
|[[Salamon, Justin|AUTHOR Justin Salamon]]|
|[[Salesky, Elizabeth|AUTHOR Elizabeth Salesky]]|
|[[Salvi, Giampiero|AUTHOR Giampiero Salvi]]|
|[[Samarasinghe, Prasanga N.|AUTHOR Prasanga N. Samarasinghe]]|
|[[Samih, Younes|AUTHOR Younes Samih]]|
|[[Samlan, Robin|AUTHOR Robin Samlan]]|
|[[Sanchis, Albert|AUTHOR Albert Sanchis]]|
|[[Sang, Mufan|AUTHOR Mufan Sang]]|
|[[Sangwan, Abhijeet|AUTHOR Abhijeet Sangwan]]|
|[[Sankaranarayanan, Karthik|AUTHOR Karthik Sankaranarayanan]]|
|[[Santos, Israel|AUTHOR Israel Santos]]|
|[[Santos, J^onatas|AUTHOR J^onatas Santos]]|
|[[Saon, George|AUTHOR George Saon]]|
|[[Sapru, Ashtosh|AUTHOR Ashtosh Sapru]]|
|[[Saraclar, Murat|AUTHOR Murat Saraclar]]|
|[[Saraf, Yatharth|AUTHOR Yatharth Saraf]]|
|[[Saranu, Kinnera|AUTHOR Kinnera Saranu]]|
|[[Sarawagi, Sunita|AUTHOR Sunita Sarawagi]]|
|[[Sarawgi, Utkarsh|AUTHOR Utkarsh Sarawgi]]|
|[[Sardana, Ashish|AUTHOR Ashish Sardana]]|
|[[Sarfjoo, Seyyed Saeed|AUTHOR Seyyed Saeed Sarfjoo]]|
|[[Sarı, Leda|AUTHOR Leda Sarı]]|
|[[Sarian, Melina|AUTHOR Melina Sarian]]|
|[[Sarma, Raghava|AUTHOR Raghava Sarma]]|
|[[Sarmah, Priyankoo|AUTHOR Priyankoo Sarmah]]|
|[[Saruwatari, Hiroshi|AUTHOR Hiroshi Saruwatari]]|
|[[Sato, Hiroshi|AUTHOR Hiroshi Sato]]|
|[[Sato, Yoshinao|AUTHOR Yoshinao Sato]]|
|[[Satpal, Sandeepkumar|AUTHOR Sandeepkumar Satpal]]|
|[[Satyanarayana, J.V.|AUTHOR J.V. Satyanarayana]]|
|[[Satyapriya, Malla|AUTHOR Malla Satyapriya]]|
|[[Saunders, Kate E.A.|AUTHOR Kate E.A. Saunders]]|
|[[Saunshikar, Ajit Ashok|AUTHOR Ajit Ashok Saunshikar]]|
|[[Sawada, Hiroshi|AUTHOR Hiroshi Sawada]]|
|[[Sawada, Kei|AUTHOR Kei Sawada]]|
|[[Sawhney, Ramit|AUTHOR Ramit Sawhney]]|
|[[Saxon, Michael|AUTHOR Michael Saxon]]|
|[[Scarborough, Rebecca|AUTHOR Rebecca Scarborough]]|
|[[Schädler, Marc René|AUTHOR Marc René Schädler]]|
|[[Scharenborg, Odette|AUTHOR Odette Scharenborg]]|
|[[Scheck, Kevin|AUTHOR Kevin Scheck]]|
|[[Scheibler, Robin|AUTHOR Robin Scheibler]]|
|[[Schepker, Henning|AUTHOR Henning Schepker]]|
|[[Schlüter, Ralf|AUTHOR Ralf Schlüter]]|
|[[Schmalenstroeer, Joerg|AUTHOR Joerg Schmalenstroeer]]|
|[[Schmidhuber, Jürgen|AUTHOR Jürgen Schmidhuber]]|
|[[Schmidt, Gerhard|AUTHOR Gerhard Schmidt]]|
|[[Schmitt, Manuel|AUTHOR Manuel Schmitt]]|
|[[Schmitt, Maximilian|AUTHOR Maximilian Schmitt]]|
|[[Schnelle-Walka, Dirk|AUTHOR Dirk Schnelle-Walka]]|
|[[Schnieder, Sebastian|AUTHOR Sebastian Schnieder]]|
|[[Schönherr, Lea|AUTHOR Lea Schönherr]]|
|[[Schröter, H.|AUTHOR H. Schröter]]|
|[[Schuller, Björn W.|AUTHOR Björn W. Schuller]]|
|[[Schuller, Gerald|AUTHOR Gerald Schuller]]|
|[[Schultz, Tanja|AUTHOR Tanja Schultz]]|
|[[Schuppler, Barbara|AUTHOR Barbara Schuppler]]|
|[[Searle, Thomas|AUTHOR Thomas Searle]]|
|[[Sedlmeier, Andreas|AUTHOR Andreas Sedlmeier]]|
|[[Seigel, Matt|AUTHOR Matt Seigel]]|
|[[Seki, Hiroshi|AUTHOR Hiroshi Seki]]|
|[[Seki, Shogo|AUTHOR Shogo Seki]]|
|[[Sekiguchi, Kouhei|AUTHOR Kouhei Sekiguchi]]|
|[[Seltzer, Michael L.|AUTHOR Michael L. Seltzer]]|
|[[Sénéchal, Thibaud|AUTHOR Thibaud Sénéchal]]|
|[[Seneviratne, Nadee|AUTHOR Nadee Seneviratne]]|
|[[Seo, Jae-sun|AUTHOR Jae-sun Seo]]|
|[[Setlur, Amrith|AUTHOR Amrith Setlur]]|
|[[Settle, Shane|AUTHOR Shane Settle]]|
|[[Seurin, Mathieu|AUTHOR Mathieu Seurin]]|
|[[Sha, Yongtao|AUTHOR Yongtao Sha]]|
|[[Shah, Aishanee|AUTHOR Aishanee Shah]]|
|[[Shah, Anish|AUTHOR Anish Shah]]|
|[[Shah, Rajiv Ratn|AUTHOR Rajiv Ratn Shah]]|
|[[Shahin, Mostafa|AUTHOR Mostafa Shahin]]|
|[[Shahnawazuddin, S.|AUTHOR S. Shahnawazuddin]]|
|[[Shaik, M. Ali Basha|AUTHOR M. Ali Basha Shaik]]|
|[[Shang, Lifeng|AUTHOR Lifeng Shang]]|
|[[Shangguan, Yuan|AUTHOR Yuan Shangguan]]|
|[[Shankar, Nikhil|AUTHOR Nikhil Shankar]]|
|[[Shankar, Ravi|AUTHOR Ravi Shankar]]|
|[[Shao, Yiwen|AUTHOR Yiwen Shao]]|
|[[Shapiro, Joshua|AUTHOR Joshua Shapiro]]|
|[[Sharifzadeh, Hamid|AUTHOR Hamid Sharifzadeh]]|
|[[Sharma, Jivitesh|AUTHOR Jivitesh Sharma]]|
|[[Sharma, Manish|AUTHOR Manish Sharma]]|
|[[Sharma, Neeraj|AUTHOR Neeraj Sharma]]|
|[[Sharma, Yash|AUTHOR Yash Sharma]]|
|[[Sharon, Rini A.|AUTHOR Rini A. Sharon]]|
|[[Shavitt, Ira|AUTHOR Ira Shavitt]]|
|[[Shaw, Ian|AUTHOR Ian Shaw]]|
|[[Shechtman, Slava|AUTHOR Slava Shechtman]]|
|[[Sheikh, Imran|AUTHOR Imran Sheikh]]|
|[[Shen, Guang|AUTHOR Guang Shen]]|
|[[Shen, Peng|AUTHOR Peng Shen]]|
|[[Shen, Shengmei|AUTHOR Shengmei Shen]]|
|[[Shen, Yi|AUTHOR Yi Shen]]|
|[[Shen, Yilin|AUTHOR Yilin Shen]]|
|[[Shi, Bowen|AUTHOR Bowen Shi]]|
|[[Shi, Hao|AUTHOR Hao Shi]]|
|[[Shi, Jiatong|AUTHOR Jiatong Shi]]|
|[[Shi, Jing|AUTHOR Jing Shi]]|
|[[Shi, Ke|AUTHOR Ke Shi]]|
|[[Shi, Liming|AUTHOR Liming Shi]]|
|[[Shi, Qian|AUTHOR Qian Shi]]|
|[[Shi, Xiaohan|AUTHOR Xiaohan Shi]]|
|[[Shi, Yangyang|AUTHOR Yangyang Shi]]|
|[[Shi, Yanpei|AUTHOR Yanpei Shi]]|
|[[Shi, Yu|AUTHOR Yu Shi]]|
|[[Shi, Ziqiang|AUTHOR Ziqiang Shi]]|
|[[Shiarella, Alexander|AUTHOR Alexander Shiarella]]|
|[[Shifas, Muhammed P.V.|AUTHOR Muhammed P.V. Shifas]]|
|[[Shih, Jerry|AUTHOR Jerry Shih]]|
|[[Shim, Hye-jin|AUTHOR Hye-jin Shim]]|
|[[Shimodaira, Hiroshi|AUTHOR Hiroshi Shimodaira]]|
|[[Shinoda, Koichi|AUTHOR Koichi Shinoda]]|
|[[Shinohara, Yusuke|AUTHOR Yusuke Shinohara]]|
|[[Shinozaki, Takahiro|AUTHOR Takahiro Shinozaki]]|
|[[Shirahata, Yuma|AUTHOR Yuma Shirahata]]|
|[[Shivkumar, Abhishek|AUTHOR Abhishek Shivkumar]]|
|[[Sholokhov, Alexey|AUTHOR Alexey Sholokhov]]|
|[[Shon, Suwon|AUTHOR Suwon Shon]]|
|[[Shor, Joel|AUTHOR Joel Shor]]|
|[[Shrivastava, Manish|AUTHOR Manish Shrivastava]]|
|[[Shulipa, Andrey|AUTHOR Andrey Shulipa]]|
|[[Shum, Stephen|AUTHOR Stephen Shum]]|
|[[Sigtia, Siddharth|AUTHOR Siddharth Sigtia]]|
|[[Silamu, Wushour|AUTHOR Wushour Silamu]]|
|[[Silnova, Anna|AUTHOR Anna Silnova]]|
|[[Silvestre-Cerdà, Joan Albert|AUTHOR Joan Albert Silvestre-Cerdà]]|
|[[Simantiraki, Olympia|AUTHOR Olympia Simantiraki]]|
|[[Simha, Pramod|AUTHOR Pramod Simha]]|
|[[Singh, Abhayjeet|AUTHOR Abhayjeet Singh]]|
|[[Singh, Kritika|AUTHOR Kritika Singh]]|
|[[Singh, Maneesh|AUTHOR Maneesh Singh]]|
|[[Singh, Mittul|AUTHOR Mittul Singh]]|
|[[Singh, Pankaj|AUTHOR Pankaj Singh]]|
|[[Singh, Prachi|AUTHOR Prachi Singh]]|
|[[Singh, Rita|AUTHOR Rita Singh]]|
|[[Singh, Sachin|AUTHOR Sachin Singh]]|
|[[Singh, Shatrughan|AUTHOR Shatrughan Singh]]|
|[[Siniscalchi, Sabato Marco|AUTHOR Sabato Marco Siniscalchi]]|
|[[Siriwardhana, Shamane|AUTHOR Shamane Siriwardhana]]|
|[[Sisman, Berrak|AUTHOR Berrak Sisman]]|
|[[Sivaraman, Aswin|AUTHOR Aswin Sivaraman]]|
|[[Sivasankaran, Sunit|AUTHOR Sunit Sivasankaran]]|
|[[Sklyar, Ilya|AUTHOR Ilya Sklyar]]|
|[[Skoglund, Jan|AUTHOR Jan Skoglund]]|
|[[Slizovskaia, Olga|AUTHOR Olga Slizovskaia]]|
|[[Smeele, Simeon|AUTHOR Simeon Smeele]]|
|[[Smith, Melissa C.|AUTHOR Melissa C. Smith]]|
|[[Socher, Richard|AUTHOR Richard Socher]]|
|[[Søgaard, Anders|AUTHOR Anders Søgaard]]|
|[[Soğancıoğlu, Gizem|AUTHOR Gizem Soğancıoğlu]]|
|[[Sogi, Naoya|AUTHOR Naoya Sogi]]|
|[[Solera-Ureña, Rubén|AUTHOR Rubén Solera-Ureña]]|
|[[Soliman, Nouran|AUTHOR Nouran Soliman]]|
|[[Soman, Akshara|AUTHOR Akshara Soman]]|
|[[Song, Binheng|AUTHOR Binheng Song]]|
|[[Song, Dandan|AUTHOR Dandan Song]]|
|[[Song, Eunwoo|AUTHOR Eunwoo Song]]|
|[[Song, Hao|AUTHOR Hao Song]]|
|[[Song, Hongtao|AUTHOR Hongtao Song]]|
|[[Song, Hui|AUTHOR Hui Song]]|
|[[Song, Liming|AUTHOR Liming Song]]|
|[[Song, Meishu|AUTHOR Meishu Song]]|
|[[Song, Wei|AUTHOR Wei Song]]|
|[[Song, Xingchen|AUTHOR Xingchen Song]]|
|[[Song, Yale|AUTHOR Yale Song]]|
|[[Song, Yan|AUTHOR Yan Song]]|
|[[Song, Yonghao|AUTHOR Yonghao Song]]|
|[[Soong, Frank K.|AUTHOR Frank K. Soong]]|
|[[Sorin, Alexander|AUTHOR Alexander Sorin]]|
|[[Sorokin, Ivan|AUTHOR Ivan Sorokin]]|
|[[Souza, Lincon S.|AUTHOR Lincon S. Souza]]|
|[[Spanakis, Gerasimos|AUTHOR Gerasimos Spanakis]]|
|[[Spanias, Andreas|AUTHOR Andreas Spanias]]|
|[[Spinelli, Elsa|AUTHOR Elsa Spinelli]]|
|[[Spinu, Laura|AUTHOR Laura Spinu]]|
|[[Squartini, Stefano|AUTHOR Stefano Squartini]]|
|[[Sreedevi, N.|AUTHOR N. Sreedevi]]|
|[[Sreeram, Anirudh|AUTHOR Anirudh Sreeram]]|
|[[Sridhar, Kusha|AUTHOR Kusha Sridhar]]|
|[[Srinivasan, Sriram|AUTHOR Sriram Srinivasan]]|
|[[Sriram, Anuroop|AUTHOR Anuroop Sriram]]|
|[[Sriskandaraja, Kaavya|AUTHOR Kaavya Sriskandaraja]]|
|[[Srivastava, Brij Mohan Lal|AUTHOR Brij Mohan Lal Srivastava]]|
|[[Srivastava, Sudhanshu|AUTHOR Sudhanshu Srivastava]]|
|[[Staib, Marlene|AUTHOR Marlene Staib]]|
|[[Stamenovic, Marko|AUTHOR Marko Stamenovic]]|
|[[Stan, Adriana|AUTHOR Adriana Stan]]|
|[[Stappen, Lukas|AUTHOR Lukas Stappen]]|
|[[Stasak, Brian|AUTHOR Brian Stasak]]|
|[[Stein, Noah D.|AUTHOR Noah D. Stein]]|
|[[Steiner, Ingmar|AUTHOR Ingmar Steiner]]|
|[[Stepanović, Marija|AUTHOR Marija Stepanović]]|
|[[Stephenson, Brooke|AUTHOR Brooke Stephenson]]|
|[[Stern, Richard M.|AUTHOR Richard M. Stern]]|
|[[Sterpu, George|AUTHOR George Sterpu]]|
|[[Stoakes, Hywel|AUTHOR Hywel Stoakes]]|
|[[Stoimenov, Emilian|AUTHOR Emilian Stoimenov]]|
|[[Stolcke, Andreas|AUTHOR Andreas Stolcke]]|
|[[Stöter, Fabian-Robert|AUTHOR Fabian-Robert Stöter]]|
|[[Strake, Maximilian|AUTHOR Maximilian Strake]]|
|[[Strik, Helmer|AUTHOR Helmer Strik]]|
|[[Strimel, Grant P.|AUTHOR Grant P. Strimel]]|
|[[Strohman, Trevor|AUTHOR Trevor Strohman]]|
|[[Strub, Florian|AUTHOR Florian Strub]]|
|[[Stüker, Sebastian|AUTHOR Sebastian Stüker]]|
|[[Sturim, Douglas|AUTHOR Douglas Sturim]]|
|[[Stylianou, Yannis|AUTHOR Yannis Stylianou]]|
|[[Su, Bo-Hao|AUTHOR Bo-Hao Su]]|
|[[Su, Chengwei|AUTHOR Chengwei Su]]|
|[[Su, Dan|AUTHOR Dan Su]]|
|[[Su, Enze|AUTHOR Enze Su]]|
|[[Su, Jiaqi|AUTHOR Jiaqi Su]]|
|[[Su, Rongfeng|AUTHOR Rongfeng Su]]|
|[[Su, Xiangdong|AUTHOR Xiangdong Su]]|
|[[Su, Zixiong|AUTHOR Zixiong Su]]|
|[[Subrahmanya, Niranjan|AUTHOR Niranjan Subrahmanya]]|
|[[Subramanian, Aswin Shanmugam|AUTHOR Aswin Shanmugam Subramanian]]|
|[[Subramanian, Vinod|AUTHOR Vinod Subramanian]]|
|[[Suda, Hitoshi|AUTHOR Hitoshi Suda]]|
|[[Suendermann-Oeft, David|AUTHOR David Suendermann-Oeft]]|
|[[Suhaimi, Nur Farah Ain|AUTHOR Nur Farah Ain Suhaimi]]|
|[[Sun, Eric|AUTHOR Eric Sun]]|
|[[Sun, Hao|AUTHOR Hao Sun]]|
|[[Sun, Jianqing|AUTHOR Jianqing Sun]]|
|[[Sun, Lei|AUTHOR Lei Sun]]|
|[[Sun, Ming|AUTHOR Ming Sun]]|
|[[Sundaram, Shiva|AUTHOR Shiva Sundaram]]|
|[[Sung, June Sig|AUTHOR June Sig Sung]]|
|[[Sung, Nako|AUTHOR Nako Sung]]|
|[[Sung, Wonyong|AUTHOR Wonyong Sung]]|
|[[Sunkara, Monica|AUTHOR Monica Sunkara]]|
|[[Suo, Hongbin|AUTHOR Hongbin Suo]]|
|[[Suthokumar, Gajan|AUTHOR Gajan Suthokumar]]|
|[[Suzuki, Masayuki|AUTHOR Masayuki Suzuki]]|
|[[Svendsen, Torbjørn|AUTHOR Torbjørn Svendsen]]|
|[[Syed, Muhammad Shehram Shah|AUTHOR Muhammad Shehram Shah Syed]]|
|[[Syed, Zafi Sherhan|AUTHOR Zafi Sherhan Syed]]|
|[[Synnaeve, Gabriel|AUTHOR Gabriel Synnaeve]]|
|[[Szakay, Anita|AUTHOR Anita Szakay]]|
|[[Szalóki, Szilvia|AUTHOR Szilvia Szalóki]]|
|[[Szendi, István|AUTHOR István Szendi]]|
|[[Szep, Jeno|AUTHOR Jeno Szep]]|
|[[Szymański, Piotr|AUTHOR Piotr Szymański]]|
|[[Szymczak, Adrian|AUTHOR Adrian Szymczak]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Tachbelie, Martha Yifiru|AUTHOR Martha Yifiru Tachbelie]]|
|[[Tachibana, Kentaro|AUTHOR Kentaro Tachibana]]|
|[[Tagliasacchi, Marco|AUTHOR Marco Tagliasacchi]]|
|[[Taigman, Yaniv|AUTHOR Yaniv Taigman]]|
|[[Tak, Hemlata|AUTHOR Hemlata Tak]]|
|[[Takada, Moe|AUTHOR Moe Takada]]|
|[[Takamichi, Shinnosuke|AUTHOR Shinnosuke Takamichi]]|
|[[Takashima, Akihiko|AUTHOR Akihiko Takashima]]|
|[[Takashima, Ryo|AUTHOR Ryo Takashima]]|
|[[Takashima, Ryoichi|AUTHOR Ryoichi Takashima]]|
|[[Takashima, Yuki|AUTHOR Yuki Takashima]]|
|[[Takeda, Kazuya|AUTHOR Kazuya Takeda]]|
|[[Takeda, Ryu|AUTHOR Ryu Takeda]]|
|[[Takeuchi, Hirotoshi|AUTHOR Hirotoshi Takeuchi]]|
|[[Takiguchi, Tetsuya|AUTHOR Tetsuya Takiguchi]]|
|[[Talkar, Tanya|AUTHOR Tanya Talkar]]|
|[[Tan, Chin-Tuan|AUTHOR Chin-Tuan Tan]]|
|[[Tan, Diana|AUTHOR Diana Tan]]|
|[[Tan, Kye Min|AUTHOR Kye Min Tan]]|
|[[Tan, Xianlong|AUTHOR Xianlong Tan]]|
|[[Tan, Xu|AUTHOR Xu Tan]]|
|[[Tan, Zheng-Hua|AUTHOR Zheng-Hua Tan]]|
|[[Tan, Zhili|AUTHOR Zhili Tan]]|
|[[Tanaka, Hiroki|AUTHOR Hiroki Tanaka]]|
|[[Tanaka, Kou|AUTHOR Kou Tanaka]]|
|[[Tanaka, Tomohiro|AUTHOR Tomohiro Tanaka]]|
|[[Taneja, Karan|AUTHOR Karan Taneja]]|
|[[Tang, Hao|AUTHOR Hao Tang]]|
|[[Tang, Kevin|AUTHOR Kevin Tang]]|
|[[Tang, Ping|AUTHOR Ping Tang]]|
|[[Tang, Yun|AUTHOR Yun Tang]]|
|[[Tang, Zhiyuan|AUTHOR Zhiyuan Tang]]|
|[[Tao, Fei|AUTHOR Fei Tao]]|
|[[Tao, Fuxiang|AUTHOR Fuxiang Tao]]|
|[[Tao, Jianhua|AUTHOR Jianhua Tao]]|
|[[Tao, Ruijie|AUTHOR Ruijie Tao]]|
|[[Tawara, Naohiro|AUTHOR Naohiro Tawara]]|
|[[Taylor, Jason|AUTHOR Jason Taylor]]|
|[[Taylor, Niall|AUTHOR Niall Taylor]]|
|[[Teh, Tian Huey|AUTHOR Tian Huey Teh]]|
|[[Teixeira, Francisco|AUTHOR Francisco Teixeira]]|
|[[ten Bosch, Louis|AUTHOR Louis ten Bosch]]|
|[[Teplansky, Kristin J.|AUTHOR Kristin J. Teplansky]]|
|[[Thai, Ngoc Thuy Huong Helen|AUTHOR Ngoc Thuy Huong Helen Thai]]|
|[[Thiagarajan, Jayaraman J.|AUTHOR Jayaraman J. Thiagarajan]]|
|[[Thienpondt, Jenthe|AUTHOR Jenthe Thienpondt]]|
|[[Thierry, Chaminade|AUTHOR Chaminade Thierry]]|
|[[Thomas, Samuel|AUTHOR Samuel Thomas]]|
|[[Tian, Biao|AUTHOR Biao Tian]]|
|[[Tian, Qiao|AUTHOR Qiao Tian]]|
|[[Tian, Xiaohai|AUTHOR Xiaohai Tian]]|
|[[Tian, Yusheng|AUTHOR Yusheng Tian]]|
|[[Tian, Zhengkun|AUTHOR Zhengkun Tian]]|
|[[Timofeeva, Tatiana|AUTHOR Tatiana Timofeeva]]|
|[[Tinkler, Morgan|AUTHOR Morgan Tinkler]]|
|[[Tirry, Wouter|AUTHOR Wouter Tirry]]|
|[[Titeux, Hadrien|AUTHOR Hadrien Titeux]]|
|[[Tits, Noé|AUTHOR Noé Tits]]|
|[[Tiwari, Gautam|AUTHOR Gautam Tiwari]]|
|[[Tiwari, Nitya|AUTHOR Nitya Tiwari]]|
|[[Tjaden, Kris|AUTHOR Kris Tjaden]]|
|[[Tjandra, Andros|AUTHOR Andros Tjandra]]|
|[[T.K., Prakash|AUTHOR Prakash T.K.]]|
|[[Toda, Tomoki|AUTHOR Tomoki Toda]]|
|[[Todisco, Massimiliano|AUTHOR Massimiliano Todisco]]|
|[[Togami, Masahito|AUTHOR Masahito Togami]]|
|[[Togneri, Roberto|AUTHOR Roberto Togneri]]|
|[[Tokuda, Keiichi|AUTHOR Keiichi Tokuda]]|
|[[Tomar, Vikrant Singh|AUTHOR Vikrant Singh Tomar]]|
|[[Tomasello, Paden|AUTHOR Paden Tomasello]]|
|[[Tomashenko, N.|AUTHOR N. Tomashenko]]|
|[[Tommasi, Marc|AUTHOR Marc Tommasi]]|
|[[Tong, Han|AUTHOR Han Tong]]|
|[[Tong, Michael Chi-Fai|AUTHOR Michael Chi-Fai Tong]]|
|[[Tong, Sibo|AUTHOR Sibo Tong]]|
|[[Tong, Ying|AUTHOR Ying Tong]]|
|[[Torres, Catalina|AUTHOR Catalina Torres]]|
|[[Torresquintero, Alexandra|AUTHOR Alexandra Torresquintero]]|
|[[Tóth, László|AUTHOR László Tóth]]|
|[[Tourbabin, Vladimir|AUTHOR Vladimir Tourbabin]]|
|[[Tran, Duc Chung|AUTHOR Duc Chung Tran]]|
|[[Tran, Dung N.|AUTHOR Dung N. Tran]]|
|[[Tran, Trang|AUTHOR Trang Tran]]|
|[[Trancoso, Isabel|AUTHOR Isabel Trancoso]]|
|[[Tran Ngoc, Anaïs|AUTHOR Anaïs Tran Ngoc]]|
|[[Trentin, Edmondo|AUTHOR Edmondo Trentin]]|
|[[Triantafyllopoulos, Andreas|AUTHOR Andreas Triantafyllopoulos]]|
|[[Triefenbach, Fabian|AUTHOR Fabian Triefenbach]]|
|[[Trinh, Viet Anh|AUTHOR Viet Anh Trinh]]|
|[[Tripathi, Anshuman|AUTHOR Anshuman Tripathi]]|
|[[Trmal, Jan|AUTHOR Jan Trmal]]|
|[[Tsao, Feng-Ming|AUTHOR Feng-Ming Tsao]]|
|[[Tsao, Yu|AUTHOR Yu Tsao]]|
|[[Tsiakoulis, Pirros|AUTHOR Pirros Tsiakoulis]]|
|[[Tsuboi, Kazuna|AUTHOR Kazuna Tsuboi]]|
|[[Tsukada, Kimiko|AUTHOR Kimiko Tsukada]]|
|[[Tsukamoto, Shin|AUTHOR Shin Tsukamoto]]|
|[[Tsunematsu, Kazuki|AUTHOR Kazuki Tsunematsu]]|
|[[Tu, Jung-Yueh|AUTHOR Jung-Yueh Tu]]|
|[[Tu, Tao|AUTHOR Tao Tu]]|
|[[Tu, Wei-Wei|AUTHOR Wei-Wei Tu]]|
|[[Tu, Yan-Hui|AUTHOR Yan-Hui Tu]]|
|[[Tu, Zehai|AUTHOR Zehai Tu]]|
|[[Tu, Zhengzheng|AUTHOR Zhengzheng Tu]]|
|[[Túbọ̀sún, Kọ́lá|AUTHOR Kọ́lá Túbọ̀sún]]|
|[[Tulsiani, Hitesh|AUTHOR Hitesh Tulsiani]]|
|[[Tuninetti, Alba|AUTHOR Alba Tuninetti]]|
|[[Tuo, Deyi|AUTHOR Deyi Tuo]]|
|[[Tur, Gokhan|AUTHOR Gokhan Tur]]|
|[[Turan, M.A. Tuğtekin|AUTHOR M.A. Tuğtekin Turan]]|
|[[Turchi, Marco|AUTHOR Marco Turchi]]|
|[[Tüske, Zoltán|AUTHOR Zoltán Tüske]]|
|[[Tuval, Omry|AUTHOR Omry Tuval]]|
|[[Tyagi, Shubhi|AUTHOR Shubhi Tyagi]]|
|[[Tyler, Michael D.|AUTHOR Michael D. Tyler]]|
|[[Tzinis, Efthymios|AUTHOR Efthymios Tzinis]]|
|[[Tzirakis, Panagiotis|AUTHOR Panagiotis Tzirakis]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Udayakumar, Tejas|AUTHOR Tejas Udayakumar]]|
|[[Ueno, Sei|AUTHOR Sei Ueno]]|
|[[Unoki, Masashi|AUTHOR Masashi Unoki]]|
|[[Upadhyay, Shreya G.|AUTHOR Shreya G. Upadhyay]]|
|[[Upton, Emily|AUTHOR Emily Upton]]|
|[[Uys, Pieter|AUTHOR Pieter Uys]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Vadisetti, Gowtham P.|AUTHOR Gowtham P. Vadisetti]]|
|[[Vainer, Jan|AUTHOR Jan Vainer]]|
|[[Valentini-Botinhao, Cassia|AUTHOR Cassia Valentini-Botinhao]]|
|[[Valin, Jean-Marc|AUTHOR Jean-Marc Valin]]|
|[[Valk, Jörgen|AUTHOR Jörgen Valk]]|
|[[Vamvoukakis, Georgios|AUTHOR Georgios Vamvoukakis]]|
|[[van Dalen, Rogier|AUTHOR Rogier van Dalen]]|
|[[van den Brekel, Michiel|AUTHOR Michiel van den Brekel]]|
|[[Van Den Broucke, Arthur|AUTHOR Arthur Van Den Broucke]]|
|[[van den Oord, Aaron|AUTHOR Aaron van den Oord]]|
|[[van der Hout, Justin|AUTHOR Justin van der Hout]]|
|[[Vanee, Kelly|AUTHOR Kelly Vanee]]|
|[[van Heerden, Charl|AUTHOR Charl van Heerden]]|
|[[van Hout, R.W.N.M.|AUTHOR R.W.N.M. van Hout]]|
|[[van Niekerk, Benjamin|AUTHOR Benjamin van Niekerk]]|
|[[van Niekerk, Daniel R.|AUTHOR Daniel R. van Niekerk]]|
|[[Van Segbroeck, Maarten|AUTHOR Maarten Van Segbroeck]]|
|[[van Son, Rob|AUTHOR Rob van Son]]|
|[[van Wijngaarden, Adriaan J.|AUTHOR Adriaan J. van Wijngaarden]]|
|[[Varas, David|AUTHOR David Varas]]|
|[[Varnet, Léo|AUTHOR Léo Varnet]]|
|[[Vasilescu, Ioana|AUTHOR Ioana Vasilescu]]|
|[[Vasilita, Mariana|AUTHOR Mariana Vasilita]]|
|[[Vásquez-Correa, Juan Camilo|AUTHOR Juan Camilo Vásquez-Correa]]|
|[[Vauquier, Nathalie|AUTHOR Nathalie Vauquier]]|
|[[Vellu, Rajan|AUTHOR Rajan Vellu]]|
|[[Venkataraman, Archana|AUTHOR Archana Venkataraman]]|
|[[Venneri, Annalena|AUTHOR Annalena Venneri]]|
|[[Verbeek, Jakob|AUTHOR Jakob Verbeek]]|
|[[Vergara-Diaz, Gloria|AUTHOR Gloria Vergara-Diaz]]|
|[[Verhulst, Sarah|AUTHOR Sarah Verhulst]]|
|[[Verkholyak, Oxana|AUTHOR Oxana Verkholyak]]|
|[[Veselý, Karel|AUTHOR Karel Veselý]]|
|[[Vestman, Ville|AUTHOR Ville Vestman]]|
|[[Vikram, C.M.|AUTHOR C.M. Vikram]]|
|[[Villalba, Jesús|AUTHOR Jesús Villalba]]|
|[[Vincent, Emmanuel|AUTHOR Emmanuel Vincent]]|
|[[Vincent, Jonathan|AUTHOR Jonathan Vincent]]|
|[[Vinciarelli, Alessandro|AUTHOR Alessandro Vinciarelli]]|
|[[Vinogradova, Alisa|AUTHOR Alisa Vinogradova]]|
|[[Vipperla, Ravichander|AUTHOR Ravichander Vipperla]]|
|[[Virkar, Yogesh|AUTHOR Yogesh Virkar]]|
|[[Virpioja, Sami|AUTHOR Sami Virpioja]]|
|[[Vishnubhotla, Srikanth|AUTHOR Srikanth Vishnubhotla]]|
|[[Visontai, Mirkó|AUTHOR Mirkó Visontai]]|
|[[Vitaladevuni, Shiv Naga Prasad|AUTHOR Shiv Naga Prasad Vitaladevuni]]|
|[[Volkova, Marina|AUTHOR Marina Volkova]]|
|[[Volokhov, Vladimir|AUTHOR Vladimir Volokhov]]|
|[[von Neumann, Thilo|AUTHOR Thilo von Neumann]]|
|[[von Platen, Patrick|AUTHOR Patrick von Platen]]|
|[[Vu, Ngoc Thang|AUTHOR Ngoc Thang Vu]]|
|[[Vuissoz, Pierre-André|AUTHOR Pierre-André Vuissoz]]|
|[[Vuong, Tyler|AUTHOR Tyler Vuong]]|
|[[Vuppala, Anil Kumar|AUTHOR Anil Kumar Vuppala]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Wagner, Petra|AUTHOR Petra Wagner]]|
|[[Waibel, Alex|AUTHOR Alex Waibel]]|
|[[Wakamiya, Kohei|AUTHOR Kohei Wakamiya]]|
|[[Walker, Traci|AUTHOR Traci Walker]]|
|[[Wan, Genshun|AUTHOR Genshun Wan]]|
|[[Wan, Guanglu|AUTHOR Guanglu Wan]]|
|[[Wan, Yangyang|AUTHOR Yangyang Wan]]|
|[[Wand, Michael|AUTHOR Michael Wand]]|
|[[Wandzio, Jan|AUTHOR Jan Wandzio]]|
|[[Wang, Bin|AUTHOR Bin Wang]]|
|[[Wang, Bing|AUTHOR Bing Wang]]|
|[[Wang, Bo|AUTHOR Bo Wang]]|
|[[Wang, Changhan|AUTHOR Changhan Wang]]|
|[[Wang, Chao|AUTHOR Chao Wang]]|
|[[Wang, Chengyi|AUTHOR Chengyi Wang]]|
|[[Wang, Dan|AUTHOR Dan Wang]]|
|[[Wang, DeLiang|AUTHOR DeLiang Wang]]|
|[[Wang, Dong|AUTHOR Dong Wang]]|
|[[Wang, Dongmei|AUTHOR Dongmei Wang]]|
|[[Wang, Gary|AUTHOR Gary Wang]]|
|[[Wang, Guangsen|AUTHOR Guangsen Wang]]|
|[[Wang, Haishuai|AUTHOR Haishuai Wang]]|
|[[Wang, Han|AUTHOR Han Wang]]|
|[[Wang, Hao|AUTHOR Hao Wang]]|
|[[Wang, Hao|AUTHOR Hao Wang]]|
|[[Wang, Haoyu|AUTHOR Haoyu Wang]]|
|[[Wang, Helin|AUTHOR Helin Wang]]|
|[[Wang, Hongji|AUTHOR Hongji Wang]]|
|[[Wang, Hsin-Min|AUTHOR Hsin-Min Wang]]|
|[[Wang, Huacan|AUTHOR Huacan Wang]]|
|[[Wang, Hui|AUTHOR Hui Wang]]|
|[[Wang, Huixin|AUTHOR Huixin Wang]]|
|[[Wang, Jiachun|AUTHOR Jiachun Wang]]|
|[[Wang, Jianzong|AUTHOR Jianzong Wang]]|
|[[Wang, Jiarui|AUTHOR Jiarui Wang]]|
|[[Wang, Jiayi|AUTHOR Jiayi Wang]]|
|[[Wang, Jingsong|AUTHOR Jingsong Wang]]|
|[[Wang, Jisung|AUTHOR Jisung Wang]]|
|[[Wang, Jixuan|AUTHOR Jixuan Wang]]|
|[[Wang, Joe|AUTHOR Joe Wang]]|
|[[Wang, Jun|AUTHOR Jun Wang]]|
|[[Wang, Jun|AUTHOR Jun Wang]]|
|[[Wang, Jun|AUTHOR Jun Wang]]|
|[[Wang, Kuang-Ching|AUTHOR Kuang-Ching Wang]]|
|[[Wang, Lan|AUTHOR Lan Wang]]|
|[[Wang, Lei|AUTHOR Lei Wang]]|
|[[Wang, Lei|AUTHOR Lei Wang]]|
|[[Wang, Li|AUTHOR Li Wang]]|
|[[Wang, Liming|AUTHOR Liming Wang]]|
|[[Wang, Linlin|AUTHOR Linlin Wang]]|
|[[Wang, Liyuan|AUTHOR Liyuan Wang]]|
|[[Wang, Longbiao|AUTHOR Longbiao Wang]]|
|[[Wang, Longshaokan|AUTHOR Longshaokan Wang]]|
|[[Wang, Luyu|AUTHOR Luyu Wang]]|
|[[Wang, Mingjiang|AUTHOR Mingjiang Wang]]|
|[[Wang, Qing|AUTHOR Qing Wang]]|
|[[Wang, Qingran|AUTHOR Qingran Wang]]|
|[[Wang, Qiongqiong|AUTHOR Qiongqiong Wang]]|
|[[Wang, Quan|AUTHOR Quan Wang]]|
|[[Wang, Ruili|AUTHOR Ruili Wang]]|
|[[Wang, Shaojun|AUTHOR Shaojun Wang]]|
|[[Wang, Shibo|AUTHOR Shibo Wang]]|
|[[Wang, Shuai|AUTHOR Shuai Wang]]|
|[[Wang, Syu-Siang|AUTHOR Syu-Siang Wang]]|
|[[Wang, Tao|AUTHOR Tao Wang]]|
|[[Wang, Tianqi|AUTHOR Tianqi Wang]]|
|[[Wang, Tong|AUTHOR Tong Wang]]|
|[[Wang, Weimin|AUTHOR Weimin Wang]]|
|[[Wang, Weiran|AUTHOR Weiran Wang]]|
|[[Wang, Weiyue|AUTHOR Weiyue Wang]]|
|[[Wang, Wenchao|AUTHOR Wenchao Wang]]|
|[[Wang, Wentao|AUTHOR Wentao Wang]]|
|[[Wang, Wenwu|AUTHOR Wenwu Wang]]|
|[[Wang, William Shi-Yuan|AUTHOR William Shi-Yuan Wang]]|
|[[Wang, Xi|AUTHOR Xi Wang]]|
|[[Wang, Xiaofei|AUTHOR Xiaofei Wang]]|
|[[Wang, Xiao-Rui|AUTHOR Xiao-Rui Wang]]|
|[[Wang, Xinhao|AUTHOR Xinhao Wang]]|
|[[Wang, Xin|AUTHOR Xin Wang]]|
|[[Wang, Xinsheng|AUTHOR Xinsheng Wang]]|
|[[Wang, Xin|AUTHOR Xin Wang]]|
|[[Wang, Yan|AUTHOR Yan Wang]]|
|[[Wang, Yang|AUTHOR Yang Wang]]|
|[[Wang, Yanhong|AUTHOR Yanhong Wang]]|
|[[Wang, Yannan|AUTHOR Yannan Wang]]|
|[[Wang, Yiming|AUTHOR Yiming Wang]]|
|[[Wang, Yongqiang|AUTHOR Yongqiang Wang]]|
|[[Wang, Yujun|AUTHOR Yujun Wang]]|
|[[Wang, Yu|AUTHOR Yu Wang]]|
|[[Wang, Yu|AUTHOR Yu Wang]]|
|[[Wang, Yu-Xuan|AUTHOR Yu-Xuan Wang]]|
|[[Wang, Zhangyang|AUTHOR Zhangyang Wang]]|
|[[Wang, Zhao|AUTHOR Zhao Wang]]|
|[[Wang, Zhenghao|AUTHOR Zhenghao Wang]]|
|[[Wang, Zhenyu|AUTHOR Zhenyu Wang]]|
|[[Wang, Zhiyong|AUTHOR Zhiyong Wang]]|
|[[Wang, Ziteng|AUTHOR Ziteng Wang]]|
|[[Warialani, Mayur|AUTHOR Mayur Warialani]]|
|[[Watanabe, Shinji|AUTHOR Shinji Watanabe]]|
|[[Watts, Oliver|AUTHOR Oliver Watts]]|
|[[Webb, Jon|AUTHOR Jon Webb]]|
|[[Webber, Jacob J.|AUTHOR Jacob J. Webber]]|
|[[Weber, Cornelius|AUTHOR Cornelius Weber]]|
|[[Weerasekera, Rivindu|AUTHOR Rivindu Weerasekera]]|
|[[Wei, Jianguo|AUTHOR Jianguo Wei]]|
|[[Wei, Wei|AUTHOR Wei Wei]]|
|[[Wei, Wenning|AUTHOR Wenning Wei]]|
|[[Wei, Wenqi|AUTHOR Wenqi Wei]]|
|[[Wei, Xiaodong|AUTHOR Xiaodong Wei]]|
|[[Wei, Yuheng|AUTHOR Yuheng Wei]]|
|[[Weiner, Jochen|AUTHOR Jochen Weiner]]|
|[[Weisman, Ran|AUTHOR Ran Weisman]]|
|[[Wellington, Scott|AUTHOR Scott Wellington]]|
|[[Wen, Shixue|AUTHOR Shixue Wen]]|
|[[Wen, Xue|AUTHOR Xue Wen]]|
|[[Wen, Zhengqi|AUTHOR Zhengqi Wen]]|
|[[Weng, Chao|AUTHOR Chao Weng]]|
|[[Weng, Shi-Yan|AUTHOR Shi-Yan Weng]]|
|[[Weninger, Felix|AUTHOR Felix Weninger]]|
|[[Wermter, Stefan|AUTHOR Stefan Wermter]]|
|[[Westhausen, Nils L.|AUTHOR Nils L. Westhausen]]|
|[[Weston, Jack|AUTHOR Jack Weston]]|
|[[Whang, Taesun|AUTHOR Taesun Whang]]|
|[[Whatmough, Paul N.|AUTHOR Paul N. Whatmough]]|
|[[Whited, Chad W.|AUTHOR Chad W. Whited]]|
|[[Whitehill, Jacob|AUTHOR Jacob Whitehill]]|
|[[Whitehill, Matt|AUTHOR Matt Whitehill]]|
|[[Wichern, Gordon|AUTHOR Gordon Wichern]]|
|[[Wiesler, Simon|AUTHOR Simon Wiesler]]|
|[[Wilkins, Nicholas|AUTHOR Nicholas Wilkins]]|
|[[Willett, Daniel|AUTHOR Daniel Willett]]|
|[[Williams, Jennifer|AUTHOR Jennifer Williams]]|
|[[Williamson, Donald S.|AUTHOR Donald S. Williamson]]|
|[[Williamson, James R.|AUTHOR James R. Williamson]]|
|[[Wills, Simone|AUTHOR Simone Wills]]|
|[[Wilson, Kevin|AUTHOR Kevin Wilson]]|
|[[Winata, Genta Indra|AUTHOR Genta Indra Winata]]|
|[[Wisler, Alan|AUTHOR Alan Wisler]]|
|[[Wohlwend, Jeremy|AUTHOR Jeremy Wohlwend]]|
|[[Woisard, Virginie|AUTHOR Virginie Woisard]]|
|[[Wolf, Lior|AUTHOR Lior Wolf]]|
|[[Wołk, Krzysztof|AUTHOR Krzysztof Wołk]]|
|[[Wong, Jeremy H.M.|AUTHOR Jeremy H.M. Wong]]|
|[[Woodland, Philip C.|AUTHOR Philip C. Woodland]]|
|[[Woodward, Alejandro|AUTHOR Alejandro Woodward]]|
|[[Woszczyk, Dominika|AUTHOR Dominika Woszczyk]]|
|[[Wu, Anne|AUTHOR Anne Wu]]|
|[[Wu, Bo|AUTHOR Bo Wu]]|
|[[Wu, Chunyang|AUTHOR Chunyang Wu]]|
|[[Wu, Da-Yi|AUTHOR Da-Yi Wu]]|
|[[Wu, Ed X.|AUTHOR Ed X. Wu]]|
|[[Wu, Haibin|AUTHOR Haibin Wu]]|
|[[Wu, Haiwei|AUTHOR Haiwei Wu]]|
|[[Wu, Huaxin|AUTHOR Huaxin Wu]]|
|[[Wu, Jian|AUTHOR Jian Wu]]|
|[[Wu, Jian|AUTHOR Jian Wu]]|
|[[Wu, Jibin|AUTHOR Jibin Wu]]|
|[[Wu, Jie|AUTHOR Jie Wu]]|
|[[Wu, Meng-Che|AUTHOR Meng-Che Wu]]|
|[[Wu, Mengyue|AUTHOR Mengyue Wu]]|
|[[Wu, Qinghua|AUTHOR Qinghua Wu]]|
|[[Wu, Shuang|AUTHOR Shuang Wu]]|
|[[Wu, Xian|AUTHOR Xian Wu]]|
|[[Wu, Xihong|AUTHOR Xihong Wu]]|
|[[Wu, Xiping|AUTHOR Xiping Wu]]|
|[[Wu, Xixin|AUTHOR Xixin Wu]]|
|[[Wu, Yanfeng|AUTHOR Yanfeng Wu]]|
|[[Wu, Yangcheng|AUTHOR Yangcheng Wu]]|
|[[Wu, Yaru|AUTHOR Yaru Wu]]|
|[[Wu, Yibo|AUTHOR Yibo Wu]]|
|[[Wu, Yi-Chiao|AUTHOR Yi-Chiao Wu]]|
|[[Wu, Yonghui|AUTHOR Yonghui Wu]]|
|[[Wu, Yu|AUTHOR Yu Wu]]|
|[[Wu, Yue|AUTHOR Yue Wu]]|
|[[Wu, Yusong|AUTHOR Yusong Wu]]|
|[[Wu, Zhenzong|AUTHOR Zhenzong Wu]]|
|[[Wu, Zhiyong|AUTHOR Zhiyong Wu]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Xezonaki, Danai|AUTHOR Danai Xezonaki]]|
|[[Xia, Wei|AUTHOR Wei Xia]]|
|[[Xia, Yangyang|AUTHOR Yangyang Xia]]|
|[[Xiang, Fei|AUTHOR Fei Xiang]]|
|[[Xiang, Hongyu|AUTHOR Hongyu Xiang]]|
|[[Xiang, Teng|AUTHOR Teng Xiang]]|
|[[Xiang, Yang|AUTHOR Yang Xiang]]|
|[[Xiao, Alex|AUTHOR Alex Xiao]]|
|[[Xiao, Jinba|AUTHOR Jinba Xiao]]|
|[[Xiao, Jing|AUTHOR Jing Xiao]]|
|[[Xiao, Longshuai|AUTHOR Longshuai Xiao]]|
|[[Xiao, Xiong|AUTHOR Xiong Xiao]]|
|[[Xie, Lei|AUTHOR Lei Xie]]|
|[[Xie, Longhan|AUTHOR Longhan Xie]]|
|[[Xie, Xiang|AUTHOR Xiang Xie]]|
|[[Xie, Xurong|AUTHOR Xurong Xie]]|
|[[Xie, Yanlu|AUTHOR Yanlu Xie]]|
|[[Xie, Yu|AUTHOR Yu Xie]]|
|[[Xin, Detai|AUTHOR Detai Xin]]|
|[[Xing, Mengtao|AUTHOR Mengtao Xing]]|
|[[Xing, Xiaofen|AUTHOR Xiaofen Xing]]|
|[[Xiong, Caiming|AUTHOR Caiming Xiong]]|
|[[Xiong, Shengwu|AUTHOR Shengwu Xiong]]|
|[[Xu, Anqi|AUTHOR Anqi Xu]]|
|[[Xu, Bo|AUTHOR Bo Xu]]|
|[[Xu, Boyan|AUTHOR Boyan Xu]]|
|[[Xu, Can|AUTHOR Can Xu]]|
|[[Xu, Chang|AUTHOR Chang Xu]]|
|[[Xu, Chenglin|AUTHOR Chenglin Xu]]|
|[[Xu, Chengwei|AUTHOR Chengwei Xu]]|
|[[Xu, Dongxiang|AUTHOR Dongxiang Xu]]|
|[[Xu, Guanghui|AUTHOR Guanghui Xu]]|
|[[Xu, Haihua|AUTHOR Haihua Xu]]|
|[[Xu, Hainan|AUTHOR Hainan Xu]]|
|[[Xu, Jiahao|AUTHOR Jiahao Xu]]|
|[[Xu, Jiaming|AUTHOR Jiaming Xu]]|
|[[Xu, Jin|AUTHOR Jin Xu]]|
|[[Xu, Jing|AUTHOR Jing Xu]]|
|[[Xu, Junhai|AUTHOR Junhai Xu]]|
|[[Xu, Kele|AUTHOR Kele Xu]]|
|[[Xu, Kun|AUTHOR Kun Xu]]|
|[[Xu, Longting|AUTHOR Longting Xu]]|
|[[Xu, Menglong|AUTHOR Menglong Xu]]|
|[[Xu, Peng|AUTHOR Peng Xu]]|
|[[Xu, Qiantong|AUTHOR Qiantong Xu]]|
|[[Xu, Shan|AUTHOR Shan Xu]]|
|[[Xu, Tianjiao|AUTHOR Tianjiao Xu]]|
|[[Xu, Xiangmin|AUTHOR Xiangmin Xu]]|
|[[Xu, Xinkang|AUTHOR Xinkang Xu]]|
|[[Xu, Xinnuo|AUTHOR Xinnuo Xu]]|
|[[Xu, Xinzi|AUTHOR Xinzi Xu]]|
|[[Xu, Yan|AUTHOR Yan Xu]]|
|[[Xu, Yi|AUTHOR Yi Xu]]|
|[[Xu, Yi|AUTHOR Yi Xu]]|
|[[Xu, Yong|AUTHOR Yong Xu]]|
|[[Xu, Zhen|AUTHOR Zhen Xu]]|
|[[Xue, W.|AUTHOR W. Xue]]|
|[[Xue, Wei|AUTHOR Wei Xue]]|
|[[Xue, Yawen|AUTHOR Yawen Xue]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Yadav, Hemant|AUTHOR Hemant Yadav]]|
|[[Yadav, Ravi|AUTHOR Ravi Yadav]]|
|[[Yadav, Ravindra|AUTHOR Ravindra Yadav]]|
|[[Yamagishi, Junichi|AUTHOR Junichi Yamagishi]]|
|[[Yamamoto, Hitoshi|AUTHOR Hitoshi Yamamoto]]|
|[[Yamamoto, Kenta|AUTHOR Kenta Yamamoto]]|
|[[Yamamoto, Ryuichi|AUTHOR Ryuichi Yamamoto]]|
|[[Yamamoto, Yoshiharu|AUTHOR Yoshiharu Yamamoto]]|
|[[Yamashita, Yuki|AUTHOR Yuki Yamashita]]|
|[[Yan, Bi-Cheng|AUTHOR Bi-Cheng Yan]]|
|[[Yan, Jie|AUTHOR Jie Yan]]|
|[[Yan, Nan|AUTHOR Nan Yan]]|
|[[Yan, Zhijie|AUTHOR Zhijie Yan]]|
|[[Yanagisawa, Kayoko|AUTHOR Kayoko Yanagisawa]]|
|[[Yanagita, Tomoya|AUTHOR Tomoya Yanagita]]|
|[[Yang, Bing|AUTHOR Bing Yang]]|
|[[Yang, Bo|AUTHOR Bo Yang]]|
|[[Yang, Chao-Han Huck|AUTHOR Chao-Han Huck Yang]]|
|[[Yang, Chen|AUTHOR Chen Yang]]|
|[[Yang, Fengyu|AUTHOR Fengyu Yang]]|
|[[Yang, Geng|AUTHOR Geng Yang]]|
|[[Yang, Hejung|AUTHOR Hejung Yang]]|
|[[Yang, Jichen|AUTHOR Jichen Yang]]|
|[[Yang, Jingzhou|AUTHOR Jingzhou Yang]]|
|[[Yang, Jinhyeok|AUTHOR Jinhyeok Yang]]|
|[[Yang, Joon-Young|AUTHOR Joon-Young Yang]]|
|[[Yang, Kisu|AUTHOR Kisu Yang]]|
|[[Yang, Lei|AUTHOR Lei Yang]]|
|[[Yang, Li-Chia|AUTHOR Li-Chia Yang]]|
|[[Yang, Longfei|AUTHOR Longfei Yang]]|
|[[Yang, Migyeong|AUTHOR Migyeong Yang]]|
|[[Yang, Peng|AUTHOR Peng Yang]]|
|[[Yang, Shan|AUTHOR Shan Yang]]|
|[[Yang, Shu-wen|AUTHOR Shu-wen Yang]]|
|[[Yang, Sohee|AUTHOR Sohee Yang]]|
|[[Yang, Xiaoyan|AUTHOR Xiaoyan Yang]]|
|[[Yang, Yi-Hsuan|AUTHOR Yi-Hsuan Yang]]|
|[[Yang, Yike|AUTHOR Yike Yang]]|
|[[Yang, Yingen|AUTHOR Yingen Yang]]|
|[[Yang, Yuhong|AUTHOR Yuhong Yang]]|
|[[Yang, Zhanlei|AUTHOR Zhanlei Yang]]|
|[[Yang, Zhaojun|AUTHOR Zhaojun Yang]]|
|[[Yang, Zhongping|AUTHOR Zhongping Yang]]|
|[[Yang, Zijiang|AUTHOR Zijiang Yang]]|
|[[Yang, Ziqing|AUTHOR Ziqing Yang]]|
|[[Yasuda, Masahiro|AUTHOR Masahiro Yasuda]]|
|[[Yasuda, Yusuke|AUTHOR Yusuke Yasuda]]|
|[[Yasuhara, Kazuki|AUTHOR Kazuki Yasuhara]]|
|[[Ye, Guoli|AUTHOR Guoli Ye]]|
|[[Ye, Jihua|AUTHOR Jihua Ye]]|
|[[Ye, Zheng|AUTHOR Zheng Ye]]|
|[[Ye, Zhongfu|AUTHOR Zhongfu Ye]]|
|[[Yegnanarayana, B.|AUTHOR B. Yegnanarayana]]|
|[[Yeh, Ching-Feng|AUTHOR Ching-Feng Yeh]]|
|[[Yeh, Sung-Lin|AUTHOR Sung-Lin Yeh]]|
|[[Yeh, Yin-Cheng|AUTHOR Yin-Cheng Yeh]]|
|[[Yeo, Eun Jung|AUTHOR Eun Jung Yeo]]|
|[[Yeo, Jinsu|AUTHOR Jinsu Yeo]]|
|[[Yesilbursa, Mansur|AUTHOR Mansur Yesilbursa]]|
|[[Yeung, Gary|AUTHOR Gary Yeung]]|
|[[Yeung, Yu Ting|AUTHOR Yu Ting Yeung]]|
|[[Yi, Dong Hoon|AUTHOR Dong Hoon Yi]]|
|[[Yi, Jiangyan|AUTHOR Jiangyan Yi]]|
|[[Yi, Lu|AUTHOR Lu Yi]]|
|[[Yılmaz, Emre|AUTHOR Emre Yılmaz]]|
|[[Yin, Shouyi|AUTHOR Shouyi Yin]]|
|[[Yoon, Hyun-Wook|AUTHOR Hyun-Wook Yoon]]|
|[[Yoon, Jaesam|AUTHOR Jaesam Yoon]]|
|[[Yoon, Jeewoo|AUTHOR Jeewoo Yoon]]|
|[[Yoon, Ji Won|AUTHOR Ji Won Yoon]]|
|[[Yoon, Seunghyun|AUTHOR Seunghyun Yoon]]|
|[[Yoshigi, Hanako|AUTHOR Hanako Yoshigi]]|
|[[Yoshii, Kazuyoshi|AUTHOR Kazuyoshi Yoshii]]|
|[[Yoshioka, Takuya|AUTHOR Takuya Yoshioka]]|
|[[You, Kisun|AUTHOR Kisun You]]|
|[[Youssef, Hmamouche|AUTHOR Hmamouche Youssef]]|
|[[Yu, Chengzhu|AUTHOR Chengzhu Yu]]|
|[[Yu, Dong|AUTHOR Dong Yu]]|
|[[Yu, Guoqiao|AUTHOR Guoqiao Yu]]|
|[[Yu, Ha-Jin|AUTHOR Ha-Jin Yu]]|
|[[Yu, Hongjiang|AUTHOR Hongjiang Yu]]|
|[[Yu, Jiahui|AUTHOR Jiahui Yu]]|
|[[Yu, Jianwei|AUTHOR Jianwei Yu]]|
|[[Yu, Kai|AUTHOR Kai Yu]]|
|[[Yu, Meng|AUTHOR Meng Yu]]|
|[[Yu, Yakun|AUTHOR Yakun Yu]]|
|[[Yu, Ya-Qi|AUTHOR Ya-Qi Yu]]|
|[[Yu, Yi|AUTHOR Yi Yu]]|
|[[Yuan, Daode|AUTHOR Daode Yuan]]|
|[[Yuan, Jiahong|AUTHOR Jiahong Yuan]]|
|[[Yuditskaya, Sophia|AUTHOR Sophia Yuditskaya]]|
|[[Yue, Yang|AUTHOR Yang Yue]]|
|[[Yue, Zhengjun|AUTHOR Zhengjun Yue]]|
|[[Yusuf, Bolaji|AUTHOR Bolaji Yusuf]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Zafonte, Ross|AUTHOR Ross Zafonte]]|
|[[Zaid, Ahmed|AUTHOR Ahmed Zaid]]|
|[[Zaiem, Mohamed Salah|AUTHOR Mohamed Salah Zaiem]]|
|[[Zainkó, Csaba|AUTHOR Csaba Zainkó]]|
|[[Zechner, Klaus|AUTHOR Klaus Zechner]]|
|[[Zeinali, Hossein|AUTHOR Hossein Zeinali]]|
|[[Żelasko, Piotr|AUTHOR Piotr Żelasko]]|
|[[Zellers, Margaret|AUTHOR Margaret Zellers]]|
|[[Zellou, Georgia|AUTHOR Georgia Zellou]]|
|[[Zeng, Belinda|AUTHOR Belinda Zeng]]|
|[[Zeng, Michael|AUTHOR Michael Zeng]]|
|[[Zeng, Zhen|AUTHOR Zhen Zeng]]|
|[[Zeng, Zhen|AUTHOR Zhen Zeng]]|
|[[Zeyer, Albert|AUTHOR Albert Zeyer]]|
|[[Zha, Weiwei|AUTHOR Weiwei Zha]]|
|[[Zhan, Puming|AUTHOR Puming Zhan]]|
|[[Zhan, Qingran|AUTHOR Qingran Zhan]]|
|[[Zhang, Bihong|AUTHOR Bihong Zhang]]|
|[[Zhang, Bin|AUTHOR Bin Zhang]]|
|[[Zhang, Bo|AUTHOR Bo Zhang]]|
|[[Zhang, Chao|AUTHOR Chao Zhang]]|
|[[Zhang, Chao|AUTHOR Chao Zhang]]|
|[[Zhang, Chao|AUTHOR Chao Zhang]]|
|[[Zhang, Chen|AUTHOR Chen Zhang]]|
|[[Zhang, Chenggang|AUTHOR Chenggang Zhang]]|
|[[Zhang, Chunlei|AUTHOR Chunlei Zhang]]|
|[[Zhang, Chuxiong|AUTHOR Chuxiong Zhang]]|
|[[Zhang, Frank|AUTHOR Frank Zhang]]|
|[[Zhang, Gaoyan|AUTHOR Gaoyan Zhang]]|
|[[Zhang, Guangyan|AUTHOR Guangyan Zhang]]|
|[[Zhang, Haimo|AUTHOR Haimo Zhang]]|
|[[Zhang, Haiteng|AUTHOR Haiteng Zhang]]|
|[[Zhang, Haitong|AUTHOR Haitong Zhang]]|
|[[Zhang, Hanyi|AUTHOR Hanyi Zhang]]|
|[[Zhang, Hao|AUTHOR Hao Zhang]]|
|[[Zhang, Haobo|AUTHOR Haobo Zhang]]|
|[[Zhang, Hui|AUTHOR Hui Zhang]]|
|[[Zhang, Jiacen|AUTHOR Jiacen Zhang]]|
|[[Zhang, Jian-shu|AUTHOR Jian-shu Zhang]]|
|[[Zhang, Jiawen|AUTHOR Jiawen Zhang]]|
|[[Zhang, Jiaxuan|AUTHOR Jiaxuan Zhang]]|
|[[Zhang, Jiliang|AUTHOR Jiliang Zhang]]|
|[[Zhang, Jinghua|AUTHOR Jinghua Zhang]]|
|[[Zhang, Jing-Xuan|AUTHOR Jing-Xuan Zhang]]|
|[[Zhang, Jinsong|AUTHOR Jinsong Zhang]]|
|[[Zhang, Kejia|AUTHOR Kejia Zhang]]|
|[[Zhang, Kun|AUTHOR Kun Zhang]]|
|[[Zhang, Li|AUTHOR Li Zhang]]|
|[[Zhang, Lin|AUTHOR Lin Zhang]]|
|[[Zhang, Liqiang|AUTHOR Liqiang Zhang]]|
|[[Zhang, Liwen|AUTHOR Liwen Zhang]]|
|[[Zhang, Lu|AUTHOR Lu Zhang]]|
|[[Zhang, Mingxin|AUTHOR Mingxin Zhang]]|
|[[Zhang, Mingyang|AUTHOR Mingyang Zhang]]|
|[[Zhang, Peng|AUTHOR Peng Zhang]]|
|[[Zhang, Peng|AUTHOR Peng Zhang]]|
|[[Zhang, Pengyuan|AUTHOR Pengyuan Zhang]]|
|[[Zhang, Qi|AUTHOR Qi Zhang]]|
|[[Zhang, Qian|AUTHOR Qian Zhang]]|
|[[Zhang, Richard|AUTHOR Richard Zhang]]|
|[[Zhang, Ruiteng|AUTHOR Ruiteng Zhang]]|
|[[Zhang, Shengkai|AUTHOR Shengkai Zhang]]|
|[[Zhang, Shiliang|AUTHOR Shiliang Zhang]]|
|[[Zhang, Shimin|AUTHOR Shimin Zhang]]|
|[[Zhang, Shi-Xiong|AUTHOR Shi-Xiong Zhang]]|
|[[Zhang, Shuai|AUTHOR Shuai Zhang]]|
|[[Zhang, Songyang|AUTHOR Songyang Zhang]]|
|[[Zhang, Tao|AUTHOR Tao Zhang]]|
|[[Zhang, Wangyou|AUTHOR Wangyou Zhang]]|
|[[Zhang, Wei-Qiang|AUTHOR Wei-Qiang Zhang]]|
|[[Zhang, Xianwei|AUTHOR Xianwei Zhang]]|
|[[Zhang, Xiaohui|AUTHOR Xiaohui Zhang]]|
|[[Zhang, Xiao-Lei|AUTHOR Xiao-Lei Zhang]]|
|[[Zhang, Xinxin|AUTHOR Xinxin Zhang]]|
|[[Zhang, Xinyu|AUTHOR Xinyu Zhang]]|
|[[Zhang, Xueliang|AUTHOR Xueliang Zhang]]|
|[[Zhang, Xueshuai|AUTHOR Xueshuai Zhang]]|
|[[Zhang, Yan|AUTHOR Yan Zhang]]|
|[[Zhang, Yi|AUTHOR Yi Zhang]]|
|[[Zhang, Yichi|AUTHOR Yichi Zhang]]|
|[[Zhang, Yizhe|AUTHOR Yizhe Zhang]]|
|[[Zhang, Yuekai|AUTHOR Yuekai Zhang]]|
|[[Zhang, Yunchun|AUTHOR Yunchun Zhang]]|
|[[Zhang, Yu|AUTHOR Yu Zhang]]|
|[[Zhang, Yu|AUTHOR Yu Zhang]]|
|[[Zhang, Zewang|AUTHOR Zewang Zhang]]|
|[[Zhang, Zhengchen|AUTHOR Zhengchen Zhang]]|
|[[Zhang, Zhengdong|AUTHOR Zhengdong Zhang]]|
|[[Zhang, Zhenjie|AUTHOR Zhenjie Zhang]]|
|[[Zhang, Zhenrui|AUTHOR Zhenrui Zhang]]|
|[[Zhang, Zhihui|AUTHOR Zhihui Zhang]]|
|[[Zhang, Zhiyong|AUTHOR Zhiyong Zhang]]|
|[[Zhang, Zhuo|AUTHOR Zhuo Zhang]]|
|[[Zhang, Zhuohuang|AUTHOR Zhuohuang Zhang]]|
|[[Zhang, Zining|AUTHOR Zining Zhang]]|
|[[Zhang, Zi-qiang|AUTHOR Zi-qiang Zhang]]|
|[[Zhang, Zixing|AUTHOR Zixing Zhang]]|
|[[Zhao, Bin|AUTHOR Bin Zhao]]|
|[[Zhao, Ethan|AUTHOR Ethan Zhao]]|
|[[Zhao, Guanlong|AUTHOR Guanlong Zhao]]|
|[[Zhao, Jiangjiang|AUTHOR Jiangjiang Zhao]]|
|[[Zhao, Jianshu|AUTHOR Jianshu Zhao]]|
|[[Zhao, Miao|AUTHOR Miao Zhao]]|
|[[Zhao, Rui|AUTHOR Rui Zhao]]|
|[[Zhao, Sheng|AUTHOR Sheng Zhao]]|
|[[Zhao, Shengkui|AUTHOR Shengkui Zhao]]|
|[[Zhao, Tianyu|AUTHOR Tianyu Zhao]]|
|[[Zhao, Xudong|AUTHOR Xudong Zhao]]|
|[[Zhao, Yan|AUTHOR Yan Zhao]]|
|[[Zhao, Yi|AUTHOR Yi Zhao]]|
|[[Zhao, Yingzhu|AUTHOR Yingzhu Zhao]]|
|[[Zhao, Yuanyuan|AUTHOR Yuanyuan Zhao]]|
|[[Zhao, Zeyu|AUTHOR Zeyu Zhao]]|
|[[Zhao, Zijian|AUTHOR Zijian Zhao]]|
|[[Zhao, Ziping|AUTHOR Ziping Zhao]]|
|[[Zheng, Chengshi|AUTHOR Chengshi Zheng]]|
|[[Zheng, Huaiyuan|AUTHOR Huaiyuan Zheng]]|
|[[Zheng, J.|AUTHOR J. Zheng]]|
|[[Zheng, Naijun|AUTHOR Naijun Zheng]]|
|[[Zheng, Nengheng|AUTHOR Nengheng Zheng]]|
|[[Zheng, Siqi|AUTHOR Siqi Zheng]]|
|[[Zheng, Thomas Fang|AUTHOR Thomas Fang Zheng]]|
|[[Zheng, Weiyi|AUTHOR Weiyi Zheng]]|
|[[Zheng, Wei-Zhong|AUTHOR Wei-Zhong Zheng]]|
|[[Zheng, Xu|AUTHOR Xu Zheng]]|
|[[Zheng, Zhenpeng|AUTHOR Zhenpeng Zheng]]|
|[[Zhi, Yiming|AUTHOR Yiming Zhi]]|
|[[Zhong, Jinghua|AUTHOR Jinghua Zhong]]|
|[[Zhong, Rongxiu|AUTHOR Rongxiu Zhong]]|
|[[Zhong, Shun-Chang|AUTHOR Shun-Chang Zhong]]|
|[[Zhong, Xiaoli|AUTHOR Xiaoli Zhong]]|
|[[Zhong, Ying|AUTHOR Ying Zhong]]|
|[[Zhou, Bowen|AUTHOR Bowen Zhou]]|
|[[Zhou, Chao|AUTHOR Chao Zhou]]|
|[[Zhou, Dao|AUTHOR Dao Zhou]]|
|[[Zhou, Di|AUTHOR Di Zhou]]|
|[[Zhou, Hengshun|AUTHOR Hengshun Zhou]]|
|[[Zhou, Huali|AUTHOR Huali Zhou]]|
|[[Zhou, Huan|AUTHOR Huan Zhou]]|
|[[Zhou, Jianwei|AUTHOR Jianwei Zhou]]|
|[[Zhou, Joey Tianyi|AUTHOR Joey Tianyi Zhou]]|
|[[Zhou, Kun|AUTHOR Kun Zhou]]|
|[[Zhou, Li|AUTHOR Li Zhou]]|
|[[Zhou, Ming|AUTHOR Ming Zhou]]|
|[[Zhou, Qiru|AUTHOR Qiru Zhou]]|
|[[Zhou, Tianyan|AUTHOR Tianyan Zhou]]|
|[[Zhou, Wei|AUTHOR Wei Zhou]]|
|[[Zhou, Xinyong|AUTHOR Xinyong Zhou]]|
|[[Zhou, Xinyuan|AUTHOR Xinyuan Zhou]]|
|[[Zhou, Yingbo|AUTHOR Yingbo Zhou]]|
|[[Zhu, Feiqi|AUTHOR Feiqi Zhu]]|
|[[Zhu, Han|AUTHOR Han Zhu]]|
|[[Zhu, Hongcheng|AUTHOR Hongcheng Zhu]]|
|[[Zhu, Jiaqi|AUTHOR Jiaqi Zhu]]|
|[[Zhu, Jihua|AUTHOR Jihua Zhu]]|
|[[Zhu, Junzhe|AUTHOR Junzhe Zhu]]|
|[[Zhu, Pai|AUTHOR Pai Zhu]]|
|[[Zhu, Su|AUTHOR Su Zhu]]|
|[[Zhu, Wei-Ping|AUTHOR Wei-Ping Zhu]]|
|[[Zhu, Weizhong|AUTHOR Weizhong Zhu]]|
|[[Zhu, Xianjin|AUTHOR Xianjin Zhu]]|
|[[Zhu, Yun|AUTHOR Yun Zhu]]|
|[[Zhu, Zhi|AUTHOR Zhi Zhu]]|
|[[Zhuang, Bairong|AUTHOR Bairong Zhuang]]|
|[[Zisserman, Andrew|AUTHOR Andrew Zisserman]]|
|[[Zobel, P.|AUTHOR P. Zobel]]|
|[[Zou, Wei|AUTHOR Wei Zou]]|
|[[Zou, Yuexian|AUTHOR Yuexian Zou]]|
|[[Zulfikar, Wazeer|AUTHOR Wazeer Zulfikar]]|
|[[Zuluaga-Gomez, Juan|AUTHOR Juan Zuluaga-Gomez]]|
|[[Zweig, Geoffrey|AUTHOR Geoffrey Zweig]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1151.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-3|PAPER Mon-3-4-3 — ATReSN-Net: Capturing Attentive Temporal Relations in Semantic Neighborhood for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATReSN-Net: Capturing Attentive Temporal Relations in Semantic Neighborhood for Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1277.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-9|PAPER Mon-2-5-9 — Joint Prediction of Punctuation and Disfluency in Speech Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Prediction of Punctuation and Disfluency in Speech Transcripts</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-1|PAPER Wed-3-1-1 — Automatic Scoring at Multi-Granularity for L2 Pronunciation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Scoring at Multi-Granularity for L2 Pronunciation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1284.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-2|PAPER Thu-2-9-2 — Joint Detection of Sentence Stress and Phrase Boundary for Prosody]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Detection of Sentence Stress and Phrase Boundary for Prosody</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2207.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-7|PAPER Thu-2-10-7 — A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1966.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-8|PAPER Mon-2-10-8 — Adversarial Separation Network for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Separation Network for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1397.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-1|PAPER Mon-3-11-1 — SpEx+: A Complete Time Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpEx+: A Complete Time Domain Speaker Extraction Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-4|PAPER Wed-1-9-4 — Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-5|PAPER Wed-2-1-5 — EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-1|PAPER Thu-1-7-1 — Dynamic Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Margin Softmax Loss for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1755.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-3|PAPER Thu-3-6-3 — Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-3|PAPER Mon-3-1-3 — Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-5|PAPER Wed-3-1-5 — Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-3|PAPER Wed-SS-2-7-3 — Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1508.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-10|PAPER Mon-2-9-10 — Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-1|PAPER Mon-1-2-1 — Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-2|PAPER Wed-3-8-2 — Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2500.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-2|PAPER Thu-2-1-2 — Adversarial Dictionary Learning for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Dictionary Learning for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2859.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-7|PAPER Thu-1-5-7 — CSL-EMG_Array: An Open Access Corpus for EMG-to-Speech Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CSL-EMG_Array: An Open Access Corpus for EMG-to-Speech Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-4|PAPER Wed-2-11-4 — Phonological Features for 0-Shot Multilingual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Features for 0-Shot Multilingual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-6|PAPER Wed-3-4-6 — Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-2|PAPER Mon-2-4-2 — Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1614.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-2|PAPER Wed-1-1-2 — Mandarin Lexical Tones: A Corpus-Based Study of Word Length, Syllable Position and Prosodic Position on Duration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin Lexical Tones: A Corpus-Based Study of Word Length, Syllable Position and Prosodic Position on Duration</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1460.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-5|PAPER Thu-2-3-5 — Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2450.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-7|PAPER Mon-1-3-7 — Comparing EEG Analyses with Different Epoch Alignments in an Auditory Lexical Decision Experiment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparing EEG Analyses with Different Epoch Alignments in an Auditory Lexical Decision Experiment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-5|PAPER Tue-1-10-5 — Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2450.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-7|PAPER Mon-1-3-7 — Comparing EEG Analyses with Different Epoch Alignments in an Auditory Lexical Decision Experiment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparing EEG Analyses with Different Epoch Alignments in an Auditory Lexical Decision Experiment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-2|PAPER Mon-2-4-2 — Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1632.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-2|PAPER Mon-2-9-2 — Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-5|PAPER Wed-SS-3-11-5 — The JD AI Speaker Verification System for the FFSVC 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JD AI Speaker Verification System for the FFSVC 2020 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2372.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-2|PAPER Thu-2-7-2 — Adversarial Separation and Adaptation Network for Far-Field Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Separation and Adaptation Network for Far-Field Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1104.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-3|PAPER Wed-2-5-3 — Multi-Scale TCN: Exploring Better Temporal DNN Model for Causal Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale TCN: Exploring Better Temporal DNN Model for Causal Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1825.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-9|PAPER Thu-3-6-9 — Recognising Emotions in Dysarthric Speech Using Typical Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognising Emotions in Dysarthric Speech Using Typical Speech Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-4|PAPER Tue-1-5-4 — Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-2|PAPER Mon-SS-2-6-2 — SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2519.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-7|PAPER Wed-3-2-7 — Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1473.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-6|PAPER Thu-1-10-6 — Nonlinear Residual Echo Suppression Using a Recurrent Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear Residual Echo Suppression Using a Recurrent Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-1|PAPER Tue-1-9-1 — Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-8|PAPER Mon-2-9-8 — Towards an ASR Error Robust Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards an ASR Error Robust Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1891.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-3|PAPER Mon-2-8-3 — Contrastive Predictive Coding of Audio with an Adversary]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contrastive Predictive Coding of Audio with an Adversary</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2444.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-4|PAPER Tue-1-9-4 — The MSP-Conversation Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The MSP-Conversation Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2639.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-6|PAPER Wed-1-8-6 — Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1805.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-8|PAPER Thu-3-9-8 — Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2742.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-9|PAPER Mon-3-7-9 — Achieving Multi-Accent ASR via Unsupervised Acoustic Model Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Achieving Multi-Accent ASR via Unsupervised Acoustic Model Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-5|PAPER Wed-2-8-5 — Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-1|PAPER Tue-1-9-1 — Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1047.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-2|PAPER Wed-2-5-2 — An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-4|PAPER Tue-1-10-4 — Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-2|PAPER Thu-1-7-2 — On Parameter Adaptation in Softmax-Based Cross-Entropy Loss for Improved Convergence Speed and Accuracy in DNN-Based Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Parameter Adaptation in Softmax-Based Cross-Entropy Loss for Improved Convergence Speed and Accuracy in DNN-Based Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1241.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-2|PAPER Tue-1-1-2 — Efficient Wait-k Models for Simultaneous Machine Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Wait-k Models for Simultaneous Machine Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-3|PAPER Mon-1-1-3 — Contextual RNN-T for Open Domain ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual RNN-T for Open Domain ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3217.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-10|PAPER Wed-2-10-10 — Whisper Activity Detection Using CNN-LSTM Based Attention Pooling Network Trained for a Speaker Identification Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Activity Detection Using CNN-LSTM Based Attention Pooling Network Trained for a Speaker Identification Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1287.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-1|PAPER Wed-3-5-1 — Wav2Spk: A Simple DNN Architecture for Learning Speaker Embeddings from Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Wav2Spk: A Simple DNN Architecture for Learning Speaker Embeddings from Waveforms</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2372.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-2|PAPER Thu-2-7-2 — Adversarial Separation and Adaptation Network for Far-Field Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Separation and Adaptation Network for Far-Field Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-4|PAPER Thu-2-7-4 — Strategies for End-to-End Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Strategies for End-to-End Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-7|PAPER Tue-1-3-7 — Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-8|PAPER Wed-2-8-8 — Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2935.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-6|PAPER Thu-2-11-6 — Visual Speech In Real Noisy Environments (VISION): A Novel Benchmark Dataset and Deep Learning-Based Baseline System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Visual Speech In Real Noisy Environments (VISION): A Novel Benchmark Dataset and Deep Learning-Based Baseline System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-9|PAPER Wed-SS-1-6-9 — Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1437.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-5|PAPER Thu-1-1-5 — StrawNet: Self-Training WaveNet for TTS in Low-Data Regimes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StrawNet: Self-Training WaveNet for TTS in Low-Data Regimes</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1430.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-6|PAPER Thu-2-9-6 — Improving the Prosody of RNN-Based English Text-To-Speech Synthesis by Incorporating a BERT Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving the Prosody of RNN-Based English Text-To-Speech Synthesis by Incorporating a BERT Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-1|PAPER Tue-1-3-1 — Modeling ASR Ambiguity for Neural Dialogue State Tracking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling ASR Ambiguity for Neural Dialogue State Tracking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-10|PAPER Wed-1-3-10 — A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2731.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-3|PAPER Thu-3-7-3 — Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-4|PAPER Thu-3-7-4 — Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-4|PAPER Mon-3-3-4 — ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2490.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-3|PAPER Mon-1-3-3 — Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2210.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-8|PAPER Mon-3-11-8 — Listen to What You Want: Neural Network-Based Universal Sound Selector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen to What You Want: Neural Network-Based Universal Sound Selector</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2388.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-8|PAPER Wed-2-4-8 — Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2519.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-7|PAPER Wed-3-2-7 — Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1524.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-6|PAPER Thu-3-8-6 — Language Model Data Augmentation Based on Text Domain Transfer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Model Data Augmentation Based on Text Domain Transfer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1459.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-8|PAPER Thu-3-6-8 — Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0093.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-1|PAPER Mon-3-9-1 — Optimization and Evaluation of an Intelligibility-Improving Signal Processing Approach (IISPA) for the Hurricane Challenge 2.0 with FADE]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimization and Evaluation of an Intelligibility-Improving Signal Processing Approach (IISPA) for the Hurricane Challenge 2.0 with FADE</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-4|PAPER Tue-SS-1-6-4 — A Comparative Study of Speech Anonymization Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Study of Speech Anonymization Metrics</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2664.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-9|PAPER Wed-2-11-9 — Efficient Neural Speech Synthesis for Low-Resource Languages Through Multilingual Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Neural Speech Synthesis for Low-Resource Languages Through Multilingual Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2983.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-6|PAPER Tue-1-1-6 — Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2860.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-4|PAPER Tue-1-1-4 — Contextualized Translation of Automatically Segmented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualized Translation of Automatically Segmented Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-1|PAPER Mon-SS-1-6-1 — Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-2|PAPER Thu-1-8-2 — Mixtures of Deep Neural Experts for Automated Speech Scoring]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixtures of Deep Neural Experts for Automated Speech Scoring</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1411.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-5|PAPER Thu-2-9-5 — Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1563.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-2|PAPER Mon-3-3-2 — SEANet: A Multi-Modal Speech Enhancement Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SEANet: A Multi-Modal Speech Enhancement Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2860.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-4|PAPER Tue-1-1-4 — Contextualized Translation of Automatically Segmented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualized Translation of Automatically Segmented Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3158.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-14|PAPER Wed-SS-1-6-14 — Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2353.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-6|PAPER Mon-2-4-6 — Microprosodic Variability in Plosives in German and Austrian German]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Microprosodic Variability in Plosives in German and Austrian German</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-9|PAPER Wed-3-3-9 — Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-8|PAPER Thu-2-6-8 — Analysis of Disfluency in Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Disfluency in Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1738.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-9|PAPER Thu-3-7-9 — Unsupervised Discovery of Recurring Speech Patterns Using Probabilistic Adaptive Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Discovery of Recurring Speech Patterns Using Probabilistic Adaptive Metrics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2509.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-4|PAPER Tue-1-4-4 — F0 Slope and Mean: Cues to Speech Segmentation in French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Slope and Mean: Cues to Speech Segmentation in French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-2|PAPER Mon-2-4-2 — Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3040.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-9|PAPER Mon-1-10-9 — Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-1|PAPER Wed-1-10-1 — The Effect of Language Proficiency on the Perception of Segmental Foreign Accent]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Language Proficiency on the Perception of Segmental Foreign Accent</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-6|PAPER Wed-SS-1-4-6 — Exploring Text and Audio Embeddings for Multi-Dimension Elderly Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Text and Audio Embeddings for Multi-Dimension Elderly Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-4|PAPER Wed-1-10-4 — Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-5|PAPER Wed-SS-1-4-5 — Phonetic, Frame Clustering and Intelligibility Analyses for the INTERSPEECH 2020 ComParE Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic, Frame Clustering and Intelligibility Analyses for the INTERSPEECH 2020 ComParE Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2285.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-5|PAPER Thu-1-4-5 — End-to-End Domain-Adversarial Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Domain-Adversarial Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-10|PAPER Mon-1-10-10 — FT SPEECH: Danish Parliament Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FT SPEECH: Danish Parliament Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2253.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-10|PAPER Thu-3-9-10 — Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1356.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-7|PAPER Mon-2-1-7 — Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1834.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-1|PAPER Mon-3-1-1 — Autosegmental Neural Nets: Should Phones and Tones be Synchronous or Asynchronous?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autosegmental Neural Nets: Should Phones and Tones be Synchronous or Asynchronous?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2121.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-9|PAPER Mon-3-3-9 — Automatic Estimation of Intelligibility Measure for Consonants in Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Estimation of Intelligibility Measure for Consonants in Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-1|PAPER Tue-1-1-1 — A DNN-HMM-DNN Hybrid Model for Discovering Word-Like Units from Spoken Captions and Image Regions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A DNN-HMM-DNN Hybrid Model for Discovering Word-Like Units from Spoken Captions and Image Regions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1949.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-8|PAPER Tue-1-3-8 — Deep F-Measure Maximization for End-to-End Speech Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep F-Measure Maximization for End-to-End Speech Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-9|PAPER Wed-1-8-9 — Evaluating Automatically Generated Phoneme Captions for Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Automatically Generated Phoneme Captions for Images</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2430.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-6|PAPER Wed-3-2-6 — Identify Speakers in Cocktail Parties with End-to-End Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identify Speakers in Cocktail Parties with End-to-End Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-9|PAPER Thu-1-4-9 — That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1601.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-6|PAPER Thu-3-1-6 — Prediction of Sleepiness Ratings from Voice by Man and Machine]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prediction of Sleepiness Ratings from Voice by Man and Machine</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2154.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-7|PAPER Wed-3-1-7 — Automatic Detection of Accent and Lexical Pronunciation Errors in Spontaneous Non-Native English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Accent and Lexical Pronunciation Errors in Spontaneous Non-Native English Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1852.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-1|PAPER Thu-1-8-1 — Spoken Language ‘Grammatical Error Correction’]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Language ‘Grammatical Error Correction’</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1890.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-4|PAPER Thu-1-8-4 — Universal Adversarial Attacks on Spoken Language Assessment Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Attacks on Spoken Language Assessment Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-5|PAPER Thu-1-8-5 — Ensemble Approaches for Uncertainty in Spoken Language Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Approaches for Uncertainty in Spoken Language Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-9|PAPER Thu-1-11-9 — Attention Forcing for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Forcing for Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1683.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-2|PAPER Thu-2-6-2 — Abstractive Spoken Document Summarization Using Hierarchical Model with Multi-Stage Attention Diversity Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Abstractive Spoken Document Summarization Using Hierarchical Model with Multi-Stage Attention Diversity Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2433.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-7|PAPER Wed-2-1-7 — Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-4|PAPER Thu-3-9-4 — Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-4|PAPER Wed-2-11-4 — Phonological Features for 0-Shot Multilingual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Features for 0-Shot Multilingual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-6|PAPER Wed-3-4-6 — Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2827.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-2|PAPER Mon-3-1-2 — Development of Multilingual ASR Using GlobalPhone for Less-Resourced Languages: The Case of Ethiopian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Multilingual ASR Using GlobalPhone for Less-Resourced Languages: The Case of Ethiopian Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2856.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-5|PAPER Mon-3-1-5 — Multilingual Acoustic and Language Modeling for Ethio-Semitic Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Acoustic and Language Modeling for Ethio-Semitic Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-3|PAPER Mon-3-9-3 — Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-4|PAPER Mon-3-9-4 — Exploring Listeners’ Speech Rate Preferences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Listeners’ Speech Rate Preferences</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-1|PAPER Wed-1-10-1 — The Effect of Language Proficiency on the Perception of Segmental Foreign Accent]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Language Proficiency on the Perception of Segmental Foreign Accent</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1963.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-1|PAPER Mon-2-9-1 — End-to-End Neural Transformer Based Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Neural Transformer Based Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1614.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-2|PAPER Wed-1-1-2 — Mandarin Lexical Tones: A Corpus-Based Study of Word Length, Syllable Position and Prosodic Position on Duration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin Lexical Tones: A Corpus-Based Study of Word Length, Syllable Position and Prosodic Position on Duration</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1460.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-5|PAPER Thu-2-3-5 — Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-2|PAPER Wed-3-2-2 — An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2285.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-5|PAPER Thu-1-4-5 — End-to-End Domain-Adversarial Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Domain-Adversarial Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2445.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-9|PAPER Mon-3-11-9 — Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-8|PAPER Wed-1-2-8 — A Transformer-Based Audio Captioning Model with Keyword Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Transformer-Based Audio Captioning Model with Keyword Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-8|PAPER Mon-1-2-8 — Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1168.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-1|PAPER Thu-3-11-1 — Sparseness-Aware DOA Estimation with Majorization Minimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sparseness-Aware DOA Estimation with Majorization Minimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-7|PAPER Wed-3-10-7 — Controlling the Strength of Emotions in Speech-Like Emotional Sound Generated by WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controlling the Strength of Emotions in Speech-Like Emotional Sound Generated by WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2347.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-8|PAPER Thu-1-11-8 — End-to-End Text-to-Speech Synthesis with Unaligned Multiple Language Units Based on Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Text-to-Speech Synthesis with Unaligned Multiple Language Units Based on Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1633.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-6|PAPER Mon-1-3-6 — Cortical Oscillatory Hierarchy for Natural Sentence Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cortical Oscillatory Hierarchy for Natural Sentence Processing</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-3|PAPER Tue-SS-1-6-3 — X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1700.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-3|PAPER Wed-2-12-3 — Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1536.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-6|PAPER Thu-2-2-6 — Comparison of Glottal Source Parameter Values in Emotional Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Glottal Source Parameter Values in Emotional Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-7|PAPER Mon-2-2-7 — CTC-Synchronous Training for Monotonic Attention Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CTC-Synchronous Training for Monotonic Attention Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1780.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-7|PAPER Wed-1-5-7 — Enhancing Monotonic Multihead Attention for Streaming ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonic Multihead Attention for Streaming ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1195.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-6|PAPER Wed-2-6-6 — Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1179.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-3|PAPER Thu-1-3-3 — Distilling the Knowledge of BERT for Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distilling the Knowledge of BERT for Sequence-to-Sequence ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-4|PAPER Mon-1-7-4 — New Advances in Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">New Advances in Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3024.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-8|PAPER Wed-3-12-8 — Speech-Image Semantic Alignment Does Not Depend on Any Prior Classification Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-Image Semantic Alignment Does Not Depend on Any Prior Classification Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-7|PAPER Mon-3-2-7 — Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2202.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-1|PAPER Wed-SS-1-6-1 — Tackling the ADReSS Challenge: A Multimodal Approach to the Automated Recognition of Alzheimer’s Dementia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tackling the ADReSS Challenge: A Multimodal Approach to the Automated Recognition of Alzheimer’s Dementia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2261.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-5|PAPER Wed-2-9-5 — StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-9|PAPER Wed-3-7-9 — Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1740.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-7|PAPER Mon-3-3-7 — End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2236.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-5|PAPER Mon-1-4-5 — Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3202.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-10|PAPER Thu-2-11-10 — Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-2|PAPER Mon-1-12-2 — Kaldi-Web: An Installation-Free, On-Device Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Kaldi-Web: An Installation-Free, On-Device Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2892.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-7|PAPER Thu-2-7-7 — A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-5|PAPER Tue-1-10-5 — Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1460.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-5|PAPER Thu-2-3-5 — Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2706.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-5|PAPER Mon-1-11-5 — Releasing a Toolkit and Comparing the Performance of Language Embeddings Across Various Spoken Language Identification Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Releasing a Toolkit and Comparing the Performance of Language Embeddings Across Various Spoken Language Identification Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1977.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-1|PAPER Thu-3-3-1 — Distributed Summation Privacy for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distributed Summation Privacy for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2944.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-8|PAPER Thu-2-7-8 — Improving On-Device Speaker Verification Using Federated Learning with Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving On-Device Speaker Verification Using Federated Learning with Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2985.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-12|PAPER Thu-2-9-12 — Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2860.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-4|PAPER Tue-1-1-4 — Contextualized Translation of Automatically Segmented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualized Translation of Automatically Segmented Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-1|PAPER Wed-1-2-1 — Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-9|PAPER Thu-3-9-9 — Aphasic Speech Recognition Using a Mixture of Speech Intelligibility Experts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Aphasic Speech Recognition Using a Mixture of Speech Intelligibility Experts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2721.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-7|PAPER Wed-SS-1-6-7 — Multi-Modal Fusion with Gating Using Audio, Lexical and Disfluency Features for Alzheimer’s Dementia Recognition from Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Fusion with Gating Using Audio, Lexical and Disfluency Features for Alzheimer’s Dementia Recognition from Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2944.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-8|PAPER Thu-2-7-8 — Improving On-Device Speaker Verification Using Federated Learning with Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving On-Device Speaker Verification Using Federated Learning with Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2860.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-4|PAPER Tue-1-1-4 — Contextualized Translation of Automatically Segmented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualized Translation of Automatically Segmented Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1746.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-6|PAPER Thu-1-5-6 — Independent and Automatic Evaluation of Speaker-Independent Acoustic-to-Articulatory Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Independent and Automatic Evaluation of Speaker-Independent Acoustic-to-Articulatory Reconstruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-1|PAPER Thu-2-7-1 — Speaker-Utterance Dual Attention for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Utterance Dual Attention for Speaker and Utterance Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2671.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-9|PAPER Wed-3-2-9 — Detecting and Counting Overlapping Speakers in Distant Speech Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting and Counting Overlapping Speakers in Distant Speech Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2860.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-4|PAPER Tue-1-1-4 — Contextualized Translation of Automatically Segmented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualized Translation of Automatically Segmented Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-9|PAPER Mon-2-4-9 — Modeling Global Body Configurations in American Sign Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Global Body Configurations in American Sign Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2918.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-11|PAPER Thu-2-9-11 — Controllable Neural Prosody Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controllable Neural Prosody Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-7|PAPER Wed-SS-1-4-7 — Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2439.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-2|PAPER Wed-SS-1-12-2 — INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising]]</div>|^<div class="cpauthorindexpersoncardpapertitle">INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-9|PAPER Wed-3-3-9 — Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2657.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-6|PAPER Thu-2-3-6 — Cues for Perception of Gender in Synthetic Voices and the Role of Identity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cues for Perception of Gender in Synthetic Voices and the Role of Identity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4008.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-4|PAPER Mon-2-12-4 — Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-2|PAPER Thu-2-2-2 — Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-8|PAPER Thu-2-2-8 — Empirical Interpretation of Speech Emotion Perception with Attention Based Model for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Empirical Interpretation of Speech Emotion Perception with Attention Based Model for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-3|PAPER Wed-3-5-3 — A Comparative Re-Assessment of Feature Extractors for Deep Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Re-Assessment of Feature Extractors for Deep Speaker Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-5|PAPER Wed-SS-2-3-5 — FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2859.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-7|PAPER Thu-1-5-7 — CSL-EMG_Array: An Open Access Corpus for EMG-to-Speech Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CSL-EMG_Array: An Open Access Corpus for EMG-to-Speech Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-8|PAPER Wed-2-10-8 — Adventitious Respiratory Classification Using Attentive Residual Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adventitious Respiratory Classification Using Attentive Residual Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1339.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-2|PAPER Tue-1-9-2 — Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-6|PAPER Wed-3-7-6 — Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1397.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-1|PAPER Mon-3-11-1 — SpEx+: A Complete Time Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpEx+: A Complete Time Domain Speaker Extraction Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1966.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-8|PAPER Mon-2-10-8 — Adversarial Separation Network for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Separation Network for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-1|PAPER Thu-1-7-1 — Dynamic Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Margin Softmax Loss for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-2|PAPER Mon-1-2-2 — Neural Spatio-Temporal Beamformer for Target Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatio-Temporal Beamformer for Target Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-4|PAPER Mon-1-2-4 — End-to-End Multi-Look Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Look Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1616.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-3|PAPER Wed-3-1-3 — An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-1|PAPER Wed-2-2-1 — Depthwise Separable Convolutional ResNet with Squeeze-and-Excitation Blocks for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Depthwise Separable Convolutional ResNet with Squeeze-and-Excitation Blocks for Small-Footprint Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2595.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-3|PAPER Wed-1-10-3 — The Effect of Input on the Production of English Tense and Lax Vowels by Chinese Learners: Evidence from an Elementary School in China]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Input on the Production of English Tense and Lax Vowels by Chinese Learners: Evidence from an Elementary School in China</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-1|PAPER Thu-1-4-1 — Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1161.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-5|PAPER Mon-2-5-5 — Investigation of Data Augmentation Techniques for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Data Augmentation Techniques for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3093.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-8|PAPER Wed-2-1-8 — UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2531.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-1|PAPER Thu-3-1-1 — Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1923.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-2|PAPER Mon-1-11-2 — The XMUSPEECH System for the AP19-OLR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for the AP19-OLR Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1960.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-3|PAPER Mon-1-11-3 — On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1704.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-3|PAPER Mon-SS-2-6-3 — The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1950.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-6|PAPER Mon-1-7-6 — Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-6|PAPER Mon-1-10-6 — CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-9|PAPER Thu-2-3-9 — Tone Variations in Regionally Accented Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tone Variations in Regionally Accented Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-7|PAPER Mon-1-4-7 — Unsupervised Methods for Evaluating Speech Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Methods for Evaluating Speech Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2883.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-10|PAPER Mon-3-3-10 — Large Scale Evaluation of Importance Maps in Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Evaluation of Importance Maps in Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2637.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-10|PAPER Tue-1-4-10 — Identifying Important Time-Frequency Locations in Continuous Speech Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Important Time-Frequency Locations in Continuous Speech Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-7|PAPER Wed-2-9-7 — DNN No-Reference PSTN Speech Quality Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DNN No-Reference PSTN Speech Quality Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3093.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-8|PAPER Wed-2-1-8 — UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2929.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-7|PAPER Thu-2-6-7 — Semantic Complexity in End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Complexity in End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-3|PAPER Thu-1-5-3 — Acoustic-to-Articulatory Inversion with Deep Autoregressive Articulatory-WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-to-Articulatory Inversion with Deep Autoregressive Articulatory-WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2117.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-3|PAPER Wed-3-12-3 — Fusion Architectures for Word-Based Audiovisual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fusion Architectures for Word-Based Audiovisual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-5|PAPER Thu-SS-1-6-5 — Sequence-Level Self-Learning with Multiple Hypotheses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-Level Self-Learning with Multiple Hypotheses</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1962.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-7|PAPER Thu-1-9-7 — Discriminative Transfer Learning for Optimizing ASR and Semantic Labeling in Task-Oriented Spoken Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Transfer Learning for Optimizing ASR and Semantic Labeling in Task-Oriented Spoken Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2253.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-10|PAPER Thu-3-9-10 — Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-10|PAPER Mon-3-4-10 — Deep Learning Based Open Set Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Open Set Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3088.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-10|PAPER Thu-3-2-10 — Spectrum Correction: Acoustic Scene Classification with Mismatched Recording Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectrum Correction: Acoustic Scene Classification with Mismatched Recording Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2261.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-5|PAPER Wed-2-9-5 — StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-9|PAPER Wed-3-7-9 — Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1335.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-9|PAPER Tue-1-4-9 — Social and Functional Pressures in Vocal Alignment: Differences for Human and Voice-AI Interlocutors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Social and Functional Pressures in Vocal Alignment: Differences for Human and Voice-AI Interlocutors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1336.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-3|PAPER Tue-1-7-3 — Perception of Concatenative vs. Neural Text-To-Speech (TTS): Differences in Intelligibility in Noise and Language Attitudes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Concatenative vs. Neural Text-To-Speech (TTS): Differences in Intelligibility in Noise and Language Attitudes</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1339.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-2|PAPER Tue-1-9-2 — Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-3|PAPER Tue-1-9-3 — Differences in Gradient Emotion Perception: Human vs. Alexa Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Differences in Gradient Emotion Perception: Human vs. Alexa Voices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1598.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-10|PAPER Thu-3-6-10 — Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-4|PAPER Wed-SS-2-7-4 — Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1843.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-3|PAPER Wed-1-2-3 — An Audio-Based Wakeword-Independent Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Audio-Based Wakeword-Independent Verification System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-7|PAPER Wed-1-2-7 — Building a Robust Word-Level Wakeword Verification Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Robust Word-Level Wakeword Verification Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-7|PAPER Mon-1-10-7 — FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2706.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-5|PAPER Mon-1-11-5 — Releasing a Toolkit and Comparing the Performance of Language Embeddings Across Various Spoken Language Identification Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Releasing a Toolkit and Comparing the Performance of Language Embeddings Across Various Spoken Language Identification Datasets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1784.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-2|PAPER Thu-1-3-2 — Finnish ASR with Deep Transformer Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finnish ASR with Deep Transformer Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1532.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-4|PAPER Wed-3-7-4 — Deep Speech Inpainting of Time-Frequency Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speech Inpainting of Time-Frequency Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-3|PAPER Mon-2-9-3 — Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1532.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-4|PAPER Wed-3-7-4 — Deep Speech Inpainting of Time-Frequency Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speech Inpainting of Time-Frequency Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2183.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2116.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-9|PAPER Thu-1-1-9 — Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1647.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-4|PAPER Wed-3-10-4 — Nonparallel Emotional Speech Conversion Using VAE-GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion Using VAE-GAN</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-9|PAPER Thu-3-4-9 — Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-5|PAPER Mon-1-9-5 — A Multi-Scale Fusion Framework for Bimodal Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multi-Scale Fusion Framework for Bimodal Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2471.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-2|PAPER Mon-1-1-2 — SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-8|PAPER Wed-1-5-8 — Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-3|PAPER Wed-3-7-3 — Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1908.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-5|PAPER Mon-1-7-5 — Self-Attentive Similarity Measurement Strategies in Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attentive Similarity Measurement Strategies in Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1436.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-2|PAPER Mon-3-11-2 — Atss-Net: Target Speaker Separation via Attention-Based Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Atss-Net: Target Speaker Separation via Attention-Based Neural Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-4|PAPER Wed-2-2-4 — Domain Aware Training for Far-Field Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Aware Training for Far-Field Small-Footprint Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1915.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-3|PAPER Wed-SS-2-3-3 — The DKU Speech Activity Detection and Speaker Identification Systems for Fearless Steps Challenge Phase-02]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU Speech Activity Detection and Speaker Identification Systems for Fearless Steps Challenge Phase-02</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1249.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-1|PAPER Wed-SS-3-11-1 — The INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-1|PAPER Thu-1-11-1 — From Speaker Verification to Multispeaker Speech Synthesis, Deep Transfer with Feedback Constraint]]</div>|^<div class="cpauthorindexpersoncardpapertitle">From Speaker Verification to Multispeaker Speech Synthesis, Deep Transfer with Feedback Constraint</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-6|PAPER Thu-1-11-6 — MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-7|PAPER Mon-2-8-7 — A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-8|PAPER Mon-2-8-8 — Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-6|PAPER Mon-2-11-6 — On Front-End Gain Invariant Modeling for Wake Word Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Front-End Gain Invariant Modeling for Wake Word Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-5|PAPER Mon-3-4-5 — Acoustic Scene Analysis with Multi-Head Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Analysis with Multi-Head Attention Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1292.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-2|PAPER Wed-1-5-2 — Low Latency End-to-End Streaming Speech Recognition with a Scout Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency End-to-End Streaming Speech Recognition with a Scout Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-6|PAPER Thu-1-11-6 — MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-11|PAPER Thu-1-11-11 — MultiSpeech: Multi-Speaker Text to Speech with Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MultiSpeech: Multi-Speaker Text to Speech with Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1104.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-3|PAPER Wed-2-5-3 — Multi-Scale TCN: Exploring Better Temporal DNN Model for Causal Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale TCN: Exploring Better Temporal DNN Model for Causal Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1785.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-8|PAPER Thu-3-7-8 — Unsupervised Acoustic Unit Representation Learning for Voice Conversion Using WaveNet Auto-Encoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Representation Learning for Voice Conversion Using WaveNet Auto-Encoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-8|PAPER Thu-3-5-8 — Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-3|PAPER Mon-2-4-3 — Processes and Consequences of Co-Articulation  in Mandarin V,,1,,N.(C,,2,,)V,,2,, Context: Phonology and Phonetics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Processes and Consequences of Co-Articulation  in Mandarin V,,1,,N.(C,,2,,)V,,2,, Context: Phonology and Phonetics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2490.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-3|PAPER Mon-1-3-3 — Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-4|PAPER Thu-2-4-4 — Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2014.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-6|PAPER Wed-3-10-6 — Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1395.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-2|PAPER Wed-3-5-2 — How Does Label Noise Affect the Quality of Speaker Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How Does Label Noise Affect the Quality of Speaker Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-5|PAPER Thu-2-1-5 — A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-5|PAPER Wed-2-10-5 — Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2989.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-7|PAPER Thu-2-11-7 — Sparse Mixture of Local Experts for Efficient Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sparse Mixture of Local Experts for Efficient Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1446.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-6|PAPER Mon-2-10-6 — Self-Attention Encoding and Pooling for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention Encoding and Pooling for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1682.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-6|PAPER Mon-1-8-6 — Quaternion Neural Networks for Multi-Channel Distant Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quaternion Neural Networks for Multi-Channel Distant Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1601.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-6|PAPER Thu-3-1-6 — Prediction of Sleepiness Ratings from Voice by Man and Machine]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prediction of Sleepiness Ratings from Voice by Man and Machine</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4009.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-5|PAPER Mon-1-12-5 — CATOTRON — A Neural Text-to-Speech System in Catalan]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CATOTRON — A Neural Text-to-Speech System in Catalan</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1788.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-5|PAPER Thu-1-11-5 — Naturalness Enhancement with Linguistic Information in End-to-End TTS Using Unsupervised Parallel Encoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Naturalness Enhancement with Linguistic Information in End-to-End TTS Using Unsupervised Parallel Encoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2942.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-7|PAPER Tue-1-10-7 — Lexical Stress in Urdu]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lexical Stress in Urdu</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-1|PAPER Wed-1-8-1 — Streaming Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-7|PAPER Mon-1-10-7 — FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-3|PAPER Thu-2-1-3 — Semi-Supervised Self-Produced Speech Enhancement and Suppression Based on Joint Source Modeling of Air- and Body-Conducted Signals Using Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Self-Produced Speech Enhancement and Suppression Based on Joint Source Modeling of Air- and Body-Conducted Signals Using Variational Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2271.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-5|PAPER Wed-1-10-5 — Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-4|PAPER Tue-SS-1-6-4 — A Comparative Study of Speech Anonymization Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Study of Speech Anonymization Metrics</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1449.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-2|PAPER Wed-1-2-2 — A Low Latency ASR-Free End to End Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Low Latency ASR-Free End to End Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1682.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-6|PAPER Mon-1-8-6 — Quaternion Neural Networks for Multi-Channel Distant Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quaternion Neural Networks for Multi-Channel Distant Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2362.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-6|PAPER Thu-3-2-6 — Evaluating the Reliability of Acoustic Speech Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating the Reliability of Acoustic Speech Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-10|PAPER Wed-2-2-10 — Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-5|PAPER Tue-1-1-5 — Self-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Training for End-to-End Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-9|PAPER Wed-1-2-9 — Neural Architecture Search for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2444.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-4|PAPER Tue-1-9-4 — The MSP-Conversation Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The MSP-Conversation Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3042.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-1|PAPER Tue-1-4-1 — Attention to Indexical Information Improves Voice Recall]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention to Indexical Information Improves Voice Recall</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3095.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-6|PAPER Wed-1-10-6 — Bilingual Acoustic Voice Variation is Similarly Structured Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bilingual Acoustic Voice Variation is Similarly Structured Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-3|PAPER Thu-3-8-3 — Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-8|PAPER Thu-2-6-8 — Analysis of Disfluency in Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Disfluency in Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1303.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-4|PAPER Mon-3-4-4 — Environment Sound Classification Using Multiple Feature Channels and Attention Based Deep Convolutional Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment Sound Classification Using Multiple Feature Channels and Attention Based Deep Convolutional Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1047.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-2|PAPER Wed-2-5-2 — An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1740.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-7|PAPER Mon-3-3-7 — End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2721.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-7|PAPER Wed-SS-1-6-7 — Multi-Modal Fusion with Gating Using Audio, Lexical and Disfluency Features for Alzheimer’s Dementia Recognition from Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Fusion with Gating Using Audio, Lexical and Disfluency Features for Alzheimer’s Dementia Recognition from Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2816.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-4|PAPER Thu-1-2-4 — End-to-End ASR with Adaptive Span Self-Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End ASR with Adaptive Span Self-Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1619.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-8|PAPER Thu-1-3-8 — Insertion-Based Modeling for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Insertion-Based Modeling for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2868.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-8|PAPER Wed-1-7-8 — Open-Set Short Utterance Forensic Speaker Verification Using Teacher-Student Network with Explicit Inductive Bias]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Open-Set Short Utterance Forensic Speaker Verification Using Teacher-Student Network with Explicit Inductive Bias</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-6|PAPER Mon-2-1-6 — Augmenting Generative Adversarial Networks for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Generative Adversarial Networks for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3158.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-14|PAPER Wed-SS-1-6-14 — Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2793.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-7|PAPER Mon-3-9-7 — Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-6|PAPER Wed-3-5-6 — Compact Speaker Embedding: lrx-Vector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compact Speaker Embedding: lrx-Vector</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2476.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-8|PAPER Mon-3-4-8 — Attention-Driven Projections for Soundscape Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Driven Projections for Soundscape Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-4|PAPER Thu-3-7-4 — Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2239.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-6|PAPER Wed-2-1-6 — Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1627.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-9|PAPER Tue-1-9-9 — An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-2|PAPER Thu-1-9-2 — Conditional Response Augmentation for Dialogue Using Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Response Augmentation for Dialogue Using Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1542.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-5|PAPER Thu-3-4-5 — Cotatron: Transcription-Guided Speech Encoder for Any-to-Many Voice Conversion Without Parallel Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cotatron: Transcription-Guided Speech Encoder for Any-to-Many Voice Conversion Without Parallel Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1420.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-4|PAPER Mon-2-10-4 — Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-2|PAPER Tue-1-2-2 — Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1805.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-8|PAPER Thu-3-9-8 — Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-3|PAPER Tue-1-1-3 — Investigating Self-Supervised Pre-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Self-Supervised Pre-Training for End-to-End Speech Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2720.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-6|PAPER Tue-SS-1-6-6 — Speech Pseudonymisation Assessment Using Voice Similarity Matrices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Pseudonymisation Assessment Using Voice Similarity Matrices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-4|PAPER Mon-1-4-4 — Automatic Analysis of Speech Prosody in Dutch]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Analysis of Speech Prosody in Dutch</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2758.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-2|PAPER Thu-3-1-2 — Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1861.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-6|PAPER Mon-3-7-6 — Unsupervised Domain Adaptation Under Label Space Mismatch for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Domain Adaptation Under Label Space Mismatch for Speech Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-4|PAPER Wed-1-10-4 — Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1112.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-10|PAPER Thu-2-8-10 — Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-7|PAPER Wed-2-9-7 — DNN No-Reference PSTN Speech Quality Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DNN No-Reference PSTN Speech Quality Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-6|PAPER Thu-1-11-6 — MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2061.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-8|PAPER Wed-2-12-8 — Speaker-Aware Linear Discriminant Analysis in Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Linear Discriminant Analysis in Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2587.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-5|PAPER Wed-SS-1-6-5 — Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2970.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-10|PAPER Wed-3-5-10 — Learning Speaker Embedding from Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Embedding from Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-9|PAPER Thu-1-4-9 — That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-5|PAPER Thu-SS-2-5-5 — x-Vectors Meet Adversarial Attacks: Benchmarking Adversarial Robustness in Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vectors Meet Adversarial Attacks: Benchmarking Adversarial Robustness in Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2834.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-6|PAPER Thu-SS-2-5-6 — Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3000.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-10|PAPER Thu-3-7-10 — Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2183.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1246.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-7|PAPER Mon-2-9-7 — Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-5|PAPER Wed-1-3-5 — Reformer-TTS: Neural Speech Synthesis with Reformer Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reformer-TTS: Neural Speech Synthesis with Reformer Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-8|PAPER Wed-3-1-8 — Context-Aware Goodness of Pronunciation for Computer-Assisted Pronunciation Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Aware Goodness of Pronunciation for Computer-Assisted Pronunciation Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2490.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-3|PAPER Mon-1-3-3 — Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1994.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-8|PAPER Thu-2-1-8 — Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-9|PAPER Thu-2-1-9 — Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1657.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-4|PAPER Wed-3-1-4 — Unsupervised Feature Adaptation Using Adversarial Multi-Task Training for Automatic Evaluation of Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Feature Adaptation Using Adversarial Multi-Task Training for Automatic Evaluation of Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3166.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-2|PAPER Thu-2-3-2 — Characterization of Singaporean Children’s English: Comparisons to American and British Counterparts Using Archetypal Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Characterization of Singaporean Children’s English: Comparisons to American and British Counterparts Using Archetypal Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2404.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-7|PAPER Thu-1-3-7 — Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1524.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-6|PAPER Thu-3-8-6 — Language Model Data Augmentation Based on Text Domain Transfer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Model Data Augmentation Based on Text Domain Transfer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-8|PAPER Mon-2-12-8 — End-to-End Deep Learning Speech Recognition Model for Silent Speech Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Deep Learning Speech Recognition Model for Silent Speech Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-8|PAPER Wed-2-8-8 — Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4002.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-1|PAPER Mon-2-12-1 — Smart Tube: A Biofeedback System for Vocal Training and Therapy Through Tube Phonation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Smart Tube: A Biofeedback System for Vocal Training and Therapy Through Tube Phonation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2596.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-9|PAPER Mon-1-5-9 — Can Auditory Nerve Models Tell us What’s Different About WaveNet Vocoded Speech?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Can Auditory Nerve Models Tell us What’s Different About WaveNet Vocoded Speech?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2480.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-6|PAPER Wed-3-12-6 — Should we Hard-Code the Recurrence Concept or Learn it Instead ? Exploring the Transformer Architecture for Audio-Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Should we Hard-Code the Recurrence Concept or Learn it Instead ? Exploring the Transformer Architecture for Audio-Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2134.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-3|PAPER Wed-2-10-3 — Discriminative Singular Spectrum Analysis for Bioacoustic Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Singular Spectrum Analysis for Bioacoustic Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-8|PAPER Mon-1-1-8 — Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-3|PAPER Wed-2-8-3 — Serialized Output Training for End-to-End Overlapped Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Serialized Output Training for End-to-End Overlapped Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3147.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-10|PAPER Mon-2-8-10 — SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1685.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-10|PAPER Wed-1-10-10 — The Different Enhancement Roles of Covarying Cues in Thai and Mandarin Tones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Different Enhancement Roles of Covarying Cues in Thai and Mandarin Tones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-3|PAPER Thu-1-5-3 — Acoustic-to-Articulatory Inversion with Deep Autoregressive Articulatory-WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-to-Articulatory Inversion with Deep Autoregressive Articulatory-WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-11|PAPER Wed-SS-1-6-11 — Exploring MMSE Score Prediction Using Verbal and Non-Verbal Cues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring MMSE Score Prediction Using Verbal and Non-Verbal Cues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-1|PAPER Wed-1-8-1 — Streaming Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-4|PAPER Tue-SS-1-6-4 — A Comparative Study of Speech Anonymization Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Study of Speech Anonymization Metrics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2241.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-7|PAPER Mon-3-10-7 — Air-Tissue Boundary Segmentation in Real Time Magnetic Resonance Imaging Video Using 3-D Convolutional Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Air-Tissue Boundary Segmentation in Real Time Magnetic Resonance Imaging Video Using 3-D Convolutional Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2730.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-5|PAPER Wed-SS-1-12-5 — A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-6|PAPER Wed-SS-1-12-6 — PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2656.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-7|PAPER Tue-1-5-7 — Deep Scattering Power Spectrum Features for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Scattering Power Spectrum Features for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-1|PAPER Wed-2-10-1 — Transfer Learning of Articulatory Information Through Phone Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning of Articulatory Information Through Phone Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2507.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-2|PAPER Wed-2-1-2 — Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2456.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-4|PAPER Mon-2-9-4 — Pretrained Semantic Speech Embeddings for End-to-End Spoken Language Understanding via Cross-Modal Teacher-Student Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pretrained Semantic Speech Embeddings for End-to-End Spoken Language Understanding via Cross-Modal Teacher-Student Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-7|PAPER Mon-3-1-7 — Improving Code-Switching Language Modeling with Artificially Generated Texts Using Cycle-Consistent Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Code-Switching Language Modeling with Artificially Generated Texts Using Cycle-Consistent Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1863.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-9|PAPER Wed-1-5-9 — High Performance Sequence-to-Sequence Model for Streaming Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Performance Sequence-to-Sequence Model for Streaming Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3040.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-9|PAPER Mon-1-10-9 — Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1869.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-1|PAPER Mon-2-1-1 — Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-4|PAPER Mon-2-1-4 — An Investigation of Cross-Cultural Semi-Supervised Learning for Continuous Affect Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Cross-Cultural Semi-Supervised Learning for Continuous Affect Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2531.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-1|PAPER Thu-3-1-1 — Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1801.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-2|PAPER Thu-3-9-2 — An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2396.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-3|PAPER Thu-3-9-3 — Hybrid Network Feature Extraction for Depression Assessment from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Network Feature Extraction for Depression Assessment from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1682.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-6|PAPER Mon-1-8-6 — Quaternion Neural Networks for Multi-Channel Distant Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quaternion Neural Networks for Multi-Channel Distant Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1861.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-6|PAPER Mon-3-7-6 — Unsupervised Domain Adaptation Under Label Space Mismatch for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Domain Adaptation Under Label Space Mismatch for Speech Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2102.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-8|PAPER Tue-1-5-8 — FusionRNN: Shared Neural Parameters for Multi-Channel Distant Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FusionRNN: Shared Neural Parameters for Multi-Channel Distant Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-7|PAPER Mon-3-2-7 — Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2720.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-6|PAPER Tue-SS-1-6-6 — Speech Pseudonymisation Assessment Using Voice Similarity Matrices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Pseudonymisation Assessment Using Voice Similarity Matrices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-4|PAPER Wed-2-9-4 — A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2918.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-11|PAPER Thu-2-9-11 — Controllable Neural Prosody Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controllable Neural Prosody Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-9|PAPER Mon-2-4-9 — Modeling Global Body Configurations in American Sign Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Global Body Configurations in American Sign Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-13|PAPER Wed-SS-1-4-13 — Are you Wearing a Mask? Improving Mask Detection from Speech Using Augmentation by Cycle-Consistent GANs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are you Wearing a Mask? Improving Mask Detection from Speech Using Augmentation by Cycle-Consistent GANs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1323.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-1|PAPER Wed-3-10-1 — Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1434.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-3|PAPER Wed-1-7-3 — Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2509.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-4|PAPER Tue-1-4-4 — F0 Slope and Mean: Cues to Speech Segmentation in French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Slope and Mean: Cues to Speech Segmentation in French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-4|PAPER Tue-1-5-4 — Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-5|PAPER Wed-3-7-5 — Real-Time Single-Channel Deep Neural Network-Based Speech Enhancement on Edge Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time Single-Channel Deep Neural Network-Based Speech Enhancement on Edge Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2757.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-10|PAPER Wed-3-2-10 — All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2928.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-4|PAPER Thu-3-10-4 — Transformer-Based Long-Context End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer-Based Long-Context End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2631.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-4|PAPER Wed-SS-1-12-4 — Dual-Signal Transformation LSTM Network for Real-Time Noise Suppression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Signal Transformation LSTM Network for Real-Time Noise Suppression</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-1|PAPER Wed-2-4-1 — Separating Varying Numbers of Sources with Auxiliary Autoencoding Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Separating Varying Numbers of Sources with Auxiliary Autoencoding Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2105.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-6|PAPER Mon-2-5-6 — A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-3|PAPER Tue-1-3-3 — Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-7|PAPER Thu-1-4-7 — MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-8|PAPER Thu-2-9-8 — Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-1|PAPER Wed-1-8-1 — Streaming Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Keyword Spotting on Mobile Devices</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3122.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-8|PAPER Thu-2-11-8 — Improved Speech Enhancement Using TCN with Multiple Encoder-Decoder Layers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using TCN with Multiple Encoder-Decoder Layers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2462.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-8|PAPER Thu-3-11-8 — Instantaneous Time Delay Estimation of Broadband Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Instantaneous Time Delay Estimation of Broadband Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-5|PAPER Mon-2-3-5 — Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-6|PAPER Mon-2-11-6 — On Front-End Gain Invariant Modeling for Wake Word Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Front-End Gain Invariant Modeling for Wake Word Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2445.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-9|PAPER Mon-3-11-9 — Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2550.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-6|PAPER Thu-1-8-6 — Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2566.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-9|PAPER Thu-2-9-9 — Discriminative Method to Extract Coarse Prosodic Structure and its Application for Statistical Phrase/Accent Command Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Method to Extract Coarse Prosodic Structure and its Application for Statistical Phrase/Accent Command Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-6|PAPER Wed-1-3-6 — CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-1|PAPER Mon-1-12-1 — ICE-Talk: An Interface for a Controllable Expressive Talking Machine]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ICE-Talk: An Interface for a Controllable Expressive Talking Machine</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-3|PAPER Wed-3-10-3 — Laughter Synthesis: Combining Seq2seq Modeling with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Laughter Synthesis: Combining Seq2seq Modeling with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-5|PAPER Thu-3-9-5 — Classification of Manifest Huntington Disease Using Vowel Distortion Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Classification of Manifest Huntington Disease Using Vowel Distortion Measures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2550.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-6|PAPER Thu-1-8-6 — Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1072.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-3|PAPER Thu-1-1-3 — A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-12|PAPER Wed-SS-1-6-12 — Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-9|PAPER Wed-3-3-9 — Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-1|PAPER Mon-1-3-1 — Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-2|PAPER Mon-1-10-2 — Developing an Open-Source Corpus of Yoruba Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing an Open-Source Corpus of Yoruba Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-4|PAPER Wed-1-8-4 — S2IGAN: Speech-to-Image Generation via Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">S2IGAN: Speech-to-Image Generation via Adversarial Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-9|PAPER Wed-1-8-9 — Evaluating Automatically Generated Phoneme Captions for Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Automatically Generated Phoneme Captions for Images</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1170.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-5|PAPER Wed-2-6-5 — Unsupervised Subword Modeling Using Autoregressive Pretraining and Cross-Lingual Phone-Aware Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Subword Modeling Using Autoregressive Pretraining and Cross-Lingual Phone-Aware Modeling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-9|PAPER Thu-1-4-9 — That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1598.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-10|PAPER Thu-3-6-10 — Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-4|PAPER Thu-1-9-4 — End-to-End Task-Oriented Dialog System Through Template Slot Value Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Task-Oriented Dialog System Through Template Slot Value Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2116.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-9|PAPER Thu-1-1-9 — Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-1|PAPER Thu-2-10-1 — Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1738.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-9|PAPER Thu-3-7-9 — Unsupervised Discovery of Recurring Speech Patterns Using Probabilistic Adaptive Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Discovery of Recurring Speech Patterns Using Probabilistic Adaptive Metrics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1303.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-4|PAPER Mon-3-4-4 — Environment Sound Classification Using Multiple Feature Channels and Attention Based Deep Convolutional Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment Sound Classification Using Multiple Feature Channels and Attention Based Deep Convolutional Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1409.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-4|PAPER Wed-3-8-4 — The Method of Random Directions Optimization for Stereo Audio Source Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Method of Random Directions Optimization for Stereo Audio Source Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-1|PAPER Wed-1-8-1 — Streaming Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-1|PAPER Thu-1-1-1 — Vocoder-Based Speech Synthesis from Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocoder-Based Speech Synthesis from Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1611.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-8|PAPER Tue-1-9-8 — Are Germans Better Haters Than Danes? Language-Specific Implicit Prosodies of Types of Hate Speech and How They Relate to Perceived Severity and Societal Rules]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are Germans Better Haters Than Danes? Language-Specific Implicit Prosodies of Types of Hate Speech and How They Relate to Perceived Severity and Societal Rules</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1607.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-2|PAPER Tue-1-10-2 — Prosody and Breathing: A Comparison Between Rhetorical and Information-Seeking Questions in German and Brazilian Portuguese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody and Breathing: A Comparison Between Rhetorical and Information-Seeking Questions in German and Brazilian Portuguese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-12|PAPER Thu-3-7-12 — Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1434.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-3|PAPER Wed-1-7-3 — Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-10|PAPER Wed-3-4-10 — Hider-Finder-Combiner: An Adversarial Architecture for General Speech Signal Modification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hider-Finder-Combiner: An Adversarial Architecture for General Speech Signal Modification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2892.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-7|PAPER Thu-2-7-7 — A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-4|PAPER Mon-3-9-4 — Exploring Listeners’ Speech Rate Preferences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Listeners’ Speech Rate Preferences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2679.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-10|PAPER Wed-2-11-10 — One Model, Many Languages: Meta-Learning for Multilingual Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One Model, Many Languages: Meta-Learning for Multilingual Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2867.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-10|PAPER Thu-1-1-10 — SpeedySpeech: Efficient Neural Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeedySpeech: Efficient Neural Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1399.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-1|PAPER Mon-3-5-1 — Singing Synthesis: With a Little Help from my Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Synthesis: With a Little Help from my Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-3|PAPER Thu-2-8-3 — Class LM and Word Mapping for Contextual Biasing in End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class LM and Word Mapping for Contextual Biasing in End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-12|PAPER Wed-SS-1-4-12 — Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-3|PAPER Wed-2-2-3 — Deep Convolutional Spiking Neural Networks for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Convolutional Spiking Neural Networks for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-1|PAPER Mon-3-3-1 — Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1771.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-8|PAPER Mon-2-1-8 — Emotion Profile Refinery for Speech Emotion Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emotion Profile Refinery for Speech Emotion Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1762.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-7|PAPER Wed-1-9-7 — EigenEmo: Spectral Utterance Representation Using Dynamic Mode Decomposition for Speech Emotion Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EigenEmo: Spectral Utterance Representation Using Dynamic Mode Decomposition for Speech Emotion Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1779.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-8|PAPER Wed-1-9-8 — Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3197.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-6|PAPER Thu-3-9-6 — Parkinson’s Disease Detection from Speech Using Single Frequency Filtering Cepstral Coefficients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parkinson’s Disease Detection from Speech Using Single Frequency Filtering Cepstral Coefficients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-1|PAPER Wed-3-2-1 — Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2299.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-2|PAPER Thu-3-3-2 — Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2831.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-5|PAPER Thu-3-5-5 — Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2476.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-8|PAPER Mon-3-4-8 — Attention-Driven Projections for Soundscape Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Driven Projections for Soundscape Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-4|PAPER Tue-1-5-4 — Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2655.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-9|PAPER Mon-3-4-9 — Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-13|PAPER Thu-3-7-13 — Glottal Closure Instants Detection from EGG Signal by Classification Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Glottal Closure Instants Detection from EGG Signal by Classification Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-7|PAPER Mon-3-7-7 — Learning Fast Adaptation on Cross-Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Fast Adaptation on Cross-Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-3|PAPER Mon-2-5-3 — Mixed Case Contextual ASR Using Capitalization Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixed Case Contextual ASR Using Capitalization Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1072.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-3|PAPER Thu-1-1-3 — A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-7|PAPER Thu-3-7-7 — Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-1|PAPER Mon-3-2-1 — Multi-Task Siamese Neural Network for Improving Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Siamese Neural Network for Improving Replay Attack Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-12|PAPER Wed-SS-1-6-12 — Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2224.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-4|PAPER Thu-2-1-4 — Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-5|PAPER Wed-SS-2-7-5 — Neural Speech Decoding for Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Decoding for Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-2|PAPER Thu-2-10-2 — Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2720.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-6|PAPER Tue-SS-1-6-6 — Speech Pseudonymisation Assessment Using Voice Similarity Matrices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Pseudonymisation Assessment Using Voice Similarity Matrices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-4|PAPER Wed-SS-1-4-4 — Surgical Mask Detection with Deep Recurrent Phonetic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Deep Recurrent Phonetic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2456.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-4|PAPER Mon-2-9-4 — Pretrained Semantic Speech Embeddings for End-to-End Spoken Language Understanding via Cross-Modal Teacher-Student Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pretrained Semantic Speech Embeddings for End-to-End Spoken Language Understanding via Cross-Modal Teacher-Student Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1269.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-1|PAPER Thu-2-1-1 — Exploiting Conic Affinity Measures to Design Speech Enhancement Systems Operating in Unseen Noise Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Conic Affinity Measures to Design Speech Enhancement Systems Operating in Unseen Noise Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2599.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-5|PAPER Thu-3-6-5 — Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-4|PAPER Mon-2-2-4 — Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-10|PAPER Wed-2-8-10 — SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-5|PAPER Tue-1-5-5 — An Alternative to MFCCs for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Alternative to MFCCs for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-3|PAPER Thu-3-4-3 — Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1410.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-3|PAPER Mon-3-8-3 — XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-2|PAPER Wed-SS-3-11-2 — Deep Embedding Learning for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Embedding Learning for Text-Dependent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-2|PAPER Tue-1-8-2 — Transformer with Bidirectional Decoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer with Bidirectional Decoder for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Investigation of NICT Submission for Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of NICT Submission for Short-Duration Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-7|PAPER Mon-3-7-7 — Learning Fast Adaptation on Cross-Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Fast Adaptation on Cross-Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1192.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-2|PAPER Mon-2-5-2 — CAM: Uninteresting Speech Detector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CAM: Uninteresting Speech Detector</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1761.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-6|PAPER Wed-2-2-6 — Deep Template Matching for Small-Footprint and Configurable Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Template Matching for Small-Footprint and Configurable Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-2|PAPER Wed-SS-3-11-2 — Deep Embedding Learning for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Embedding Learning for Text-Dependent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-6|PAPER Mon-3-11-6 — A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2816.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-4|PAPER Thu-1-2-4 — End-to-End ASR with Adaptive Span Self-Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End ASR with Adaptive Span Self-Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1955.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-4|PAPER Thu-SS-2-5-4 — Inaudible Adversarial Perturbations for Targeted Attack in Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Inaudible Adversarial Perturbations for Targeted Attack in Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2067.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-6|PAPER Mon-1-9-6 — Group Gated Fusion on Attention-Based Bidirectional Alignment for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Group Gated Fusion on Attention-Based Bidirectional Alignment for Multimodal Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1606.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-7|PAPER Mon-1-8-7 — Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-2|PAPER Wed-SS-2-3-2 — Speaker Diarization System Based on DPCA Algorithm for Fearless Steps Challenge Phase-2]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization System Based on DPCA Algorithm for Fearless Steps Challenge Phase-2</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-7|PAPER Thu-2-8-7 — Domain Adaptation Using Class Similarity for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation Using Class Similarity for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2639.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-6|PAPER Wed-1-8-6 — Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3122.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-8|PAPER Thu-2-11-8 — Improved Speech Enhancement Using TCN with Multiple Encoder-Decoder Layers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using TCN with Multiple Encoder-Decoder Layers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-3|PAPER Mon-2-5-3 — Mixed Case Contextual ASR Using Capitalization Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixed Case Contextual ASR Using Capitalization Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0017.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-8|PAPER Mon-2-11-8 — On the Robustness and Training Dynamics of Raw Waveform Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Robustness and Training Dynamics of Raw Waveform Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-1|PAPER Tue-1-5-1 — Raw Sign and Magnitude Spectra for Multi-Head Acoustic Modelling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Sign and Magnitude Spectra for Multi-Head Acoustic Modelling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-3|PAPER Tue-1-5-3 — A Deep 2D Convolutional Network for Waveform-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep 2D Convolutional Network for Waveform-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2656.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-7|PAPER Tue-1-5-7 — Deep Scattering Power Spectrum Features for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Scattering Power Spectrum Features for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-3|PAPER Wed-1-1-3 — An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-2|PAPER Thu-2-10-2 — Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-5|PAPER Wed-1-8-5 — Automatic Speech Recognition Benchmark for Air-Traffic Communications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Speech Recognition Benchmark for Air-Traffic Communications</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-4|PAPER Thu-1-7-4 — Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2919.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-4|PAPER Thu-3-5-4 — Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-8|PAPER Mon-2-3-8 — An Evaluation of Manual and Semi-Automatic Laughter Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Evaluation of Manual and Semi-Automatic Laughter Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1954.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-7|PAPER Wed-1-1-7 — Variation in Spectral Slope and Interharmonic Noise in Cantonese Tones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variation in Spectral Slope and Interharmonic Noise in Cantonese Tones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-7|PAPER Wed-3-5-7 — Cosine-Distance Virtual Adversarial Training for Semi-Supervised Speaker-Discriminative Acoustic Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cosine-Distance Virtual Adversarial Training for Semi-Supervised Speaker-Discriminative Acoustic Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-1|PAPER Tue-1-10-1 — Correlating Cepstra with Formant Frequencies: Implications for Phonetically-Informed Forensic Voice Comparison]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Correlating Cepstra with Formant Frequencies: Implications for Phonetically-Informed Forensic Voice Comparison</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-6|PAPER Mon-2-9-6 — Improving End-to-End Speech-to-Intent Classification with Reptile]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving End-to-End Speech-to-Intent Classification with Reptile</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-4|PAPER Wed-SS-1-4-4 — Surgical Mask Detection with Deep Recurrent Phonetic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Deep Recurrent Phonetic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2892.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-7|PAPER Thu-2-7-7 — A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1532.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-4|PAPER Wed-3-7-4 — Deep Speech Inpainting of Time-Frequency Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speech Inpainting of Time-Frequency Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-6|PAPER Thu-3-3-6 — Detecting Audio Attacks on ASR Systems with Dropout Uncertainty]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Audio Attacks on ASR Systems with Dropout Uncertainty</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1586.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-9|PAPER Thu-3-8-9 — Language Modeling for Speech Analytics in Under-Resourced Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Modeling for Speech Analytics in Under-Resourced Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2567.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-8|PAPER Tue-1-7-8 — An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1987.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-6|PAPER Wed-1-1-6 — Pitch Declination and Final Lowering in Northeastern Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pitch Declination and Final Lowering in Northeastern Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1274.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-8|PAPER Wed-1-1-8 — The Acoustic Realization of Mandarin Tones in Fast Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Acoustic Realization of Mandarin Tones in Fast Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2261.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-5|PAPER Wed-2-9-5 — StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-9|PAPER Wed-3-7-9 — Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2970.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-10|PAPER Wed-3-5-10 — Learning Speaker Embedding from Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Embedding from Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-9|PAPER Thu-1-4-9 — That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3000.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-10|PAPER Thu-3-7-10 — Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2649.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-7|PAPER Wed-1-8-7 — Risk Forecasting from Earnings Calls Acoustics and Network Correlations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Risk Forecasting from Earnings Calls Acoustics and Network Correlations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3093.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-8|PAPER Wed-2-1-8 — UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1607.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-2|PAPER Tue-1-10-2 — Prosody and Breathing: A Comparison Between Rhetorical and Information-Seeking Questions in German and Brazilian Portuguese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody and Breathing: A Comparison Between Rhetorical and Information-Seeking Questions in German and Brazilian Portuguese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1313.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-1|PAPER Thu-1-9-1 — Stochastic Curiosity Exploration for Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stochastic Curiosity Exploration for Dialogue Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1736.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-5|PAPER Mon-1-5-5 — WG-WaveNet: Real-Time High-Fidelity Speech Synthesis Without GPU]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WG-WaveNet: Real-Time High-Fidelity Speech Synthesis Without GPU</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1446.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-6|PAPER Mon-2-10-6 — Self-Attention Encoding and Pooling for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention Encoding and Pooling for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1683.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-2|PAPER Thu-2-6-2 — Abstractive Spoken Document Summarization Using Hierarchical Model with Multi-Stage Attention Diversity Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Abstractive Spoken Document Summarization Using Hierarchical Model with Multi-Stage Attention Diversity Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2514.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-8|PAPER Thu-3-8-8 — Improving Speech Recognition of Compound-Rich Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition of Compound-Rich Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2297.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-7|PAPER Mon-1-7-7 — Deep Self-Supervised Hierarchical Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Self-Supervised Hierarchical Clustering for Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2599.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-5|PAPER Thu-3-6-5 — Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-8|PAPER Wed-3-4-8 — Learning Joint Articulatory-Acoustic Representations with Normalizing Flows]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Joint Articulatory-Acoustic Representations with Normalizing Flows</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1330.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-2|PAPER Wed-3-9-2 — Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-3|PAPER Mon-2-9-3 — Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-10|PAPER Thu-2-6-10 — Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-4|PAPER Wed-2-9-4 — A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-7|PAPER Thu-3-11-7 — Acoustic Signal Enhancement Using Relative Harmonic Coefficients: Spherical Harmonics Domain Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Signal Enhancement Using Relative Harmonic Coefficients: Spherical Harmonics Domain Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1222.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-3|PAPER Mon-3-10-3 — Speaker Conditioned Acoustic-to-Articulatory Inversion Using x-Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Conditioned Acoustic-to-Articulatory Inversion Using x-Vectors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2241.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-7|PAPER Mon-3-10-7 — Air-Tissue Boundary Segmentation in Real Time Magnetic Resonance Imaging Video Using 3-D Convolutional Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Air-Tissue Boundary Segmentation in Real Time Magnetic Resonance Imaging Video Using 3-D Convolutional Neural Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2709.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-8|PAPER Mon-3-10-8 — An Investigation of the Virtual Lip Trajectories During the Production of Bilabial Stops and Nasal at Different Speaking Rates]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of the Virtual Lip Trajectories During the Production of Bilabial Stops and Nasal at Different Speaking Rates</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2259.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-4|PAPER Wed-2-10-4 — Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2708.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-7|PAPER Wed-2-10-7 — Attention and Encoder-Decoder Based Models for Transforming Articulatory Movements at Different Speaking Rates]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention and Encoder-Decoder Based Models for Transforming Articulatory Movements at Different Speaking Rates</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3217.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-10|PAPER Wed-2-10-10 — Whisper Activity Detection Using CNN-LSTM Based Attention Pooling Network Trained for a Speaker Identification Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Activity Detection Using CNN-LSTM Based Attention Pooling Network Trained for a Speaker Identification Task</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2599.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-5|PAPER Thu-3-6-5 — Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-8|PAPER Mon-3-2-8 — Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2699.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-9|PAPER Thu-2-7-9 — Neural PLDA Modeling for End-to-End Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural PLDA Modeling for End-to-End Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-8|PAPER Mon-3-7-8 — Black-Box Adaptation of ASR for Accented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Black-Box Adaptation of ASR for Accented Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-9|PAPER Thu-3-5-9 — Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2276.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-6|PAPER Wed-3-3-6 — Automatic Prediction of Confidence Level from Children’s Oral Reading Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Prediction of Confidence Level from Children’s Oral Reading Recordings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-1|PAPER Mon-1-3-1 — Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2639.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-6|PAPER Wed-1-8-6 — Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2695.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-1|PAPER Wed-1-1-1 — Interaction of Tone and Voicing in Mizo]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interaction of Tone and Voicing in Mizo</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-4|PAPER Wed-2-8-4 — Semi-Supervised Learning with Data Augmentation for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning with Data Augmentation for End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2649.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-7|PAPER Wed-1-8-7 — Risk Forecasting from Earnings Calls Acoustics and Network Correlations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Risk Forecasting from Earnings Calls Acoustics and Network Correlations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-9|PAPER Wed-2-1-9 — Towards Automatic Assessment of Voice Disorders: A Clinical Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Automatic Assessment of Voice Disorders: A Clinical Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2301.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-2|PAPER Tue-1-5-2 — Robust Raw Waveform Speech Recognition Using Relevance Weighted Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Raw Waveform Speech Recognition Using Relevance Weighted Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-8|PAPER Mon-3-1-8 — Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1606.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-7|PAPER Mon-1-8-7 — Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1187.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-2|PAPER Mon-3-10-2 — Improving the Performance of Acoustic-to-Articulatory Inversion by Removing the Training Loss of Noncritical Portions of Articulatory Channels Dynamically]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving the Performance of Acoustic-to-Articulatory Inversion by Removing the Training Loss of Noncritical Portions of Articulatory Channels Dynamically</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1307.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-1|PAPER Thu-1-10-1 — A Semi-Blind Source Separation Approach for Speech Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Semi-Blind Source Separation Approach for Speech Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1772.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-8|PAPER Tue-1-2-8 — Speaker Re-Identification with Speaker Dependent Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Re-Identification with Speaker Dependent Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1774.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-4|PAPER Wed-2-12-4 — Weakly Supervised Training of Hierarchical Attention Networks for Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weakly Supervised Training of Hierarchical Attention Networks for Speaker Identification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1885.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-4|PAPER Thu-3-2-4 — Exploration of Audio Quality Assessment and Anomaly Localisation Using Attention Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of Audio Quality Assessment and Anomaly Localisation Using Attention Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-9|PAPER Mon-2-11-9 — Iterative Pseudo-Labeling for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Pseudo-Labeling for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-5|PAPER Tue-1-1-5 — Self-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Training for End-to-End Speech Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-10|PAPER Wed-2-6-10 — MLS: A Large-Scale Multilingual Dataset for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLS: A Large-Scale Multilingual Dataset for Speech Research</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-2|PAPER Mon-1-5-2 — FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2396.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-3|PAPER Thu-3-9-3 — Hybrid Network Feature Extraction for Depression Assessment from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Network Feature Extraction for Depression Assessment from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-4|PAPER Mon-1-9-4 — WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-8|PAPER Wed-3-1-8 — Context-Aware Goodness of Pronunciation for Computer-Assisted Pronunciation Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Aware Goodness of Pronunciation for Computer-Assisted Pronunciation Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-13|PAPER Thu-2-9-13 — Interactive Text-to-Speech System via Joint Style Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interactive Text-to-Speech System via Joint Style Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2568.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-8|PAPER Wed-2-2-8 — An Investigation of Few-Shot Learning in Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Few-Shot Learning in Spoken Term Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1955.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-4|PAPER Thu-SS-2-5-4 — Inaudible Adversarial Perturbations for Targeted Attack in Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Inaudible Adversarial Perturbations for Targeted Attack in Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-10|PAPER Wed-3-10-10 — Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1908.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-5|PAPER Mon-1-7-5 — Self-Attentive Similarity Measurement Strategies in Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attentive Similarity Measurement Strategies in Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1436.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-2|PAPER Mon-3-11-2 — Atss-Net: Target Speaker Separation via Attention-Based Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Atss-Net: Target Speaker Separation via Attention-Based Neural Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1915.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-3|PAPER Wed-SS-2-3-3 — The DKU Speech Activity Detection and Speaker Identification Systems for Fearless Steps Challenge Phase-02]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU Speech Activity Detection and Speaker Identification Systems for Fearless Steps Challenge Phase-02</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2507.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-2|PAPER Wed-2-1-2 — Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1727.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-2|PAPER Mon-3-7-2 — Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-5|PAPER Wed-1-8-5 — Automatic Speech Recognition Benchmark for Air-Traffic Communications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Speech Recognition Benchmark for Air-Traffic Communications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2304.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-7|PAPER Mon-3-8-7 — Speech Driven Talking Head Generation via Attentional Landmarks Based Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Talking Head Generation via Attentional Landmarks Based Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1923.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-2|PAPER Mon-1-11-2 — The XMUSPEECH System for the AP19-OLR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for the AP19-OLR Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1960.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-3|PAPER Mon-1-11-3 — On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1704.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-3|PAPER Mon-SS-2-6-3 — The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-3|PAPER Thu-3-10-3 — Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-9|PAPER Thu-1-11-9 — Attention Forcing for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Forcing for Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-2|PAPER Wed-2-4-2 — On Synthesis for Supervised Monaural Speech Separation in Time Domain]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Synthesis for Supervised Monaural Speech Separation in Time Domain</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2205.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-6|PAPER Wed-2-4-6 — Dual-Path Transformer Network: Direct Context-Aware Modeling for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Path Transformer Network: Direct Context-Aware Modeling for End-to-End Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1896.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-5|PAPER Thu-2-6-5 — Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1896.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-5|PAPER Thu-2-6-5 — Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-3|PAPER Mon-2-5-3 — Mixed Case Contextual ASR Using Capitalization Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixed Case Contextual ASR Using Capitalization Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1676.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-4|PAPER Tue-1-3-4 — Data Balancing for Boosting Performance of Low-Frequency Classes in Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Balancing for Boosting Performance of Low-Frequency Classes in Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-7|PAPER Wed-3-3-7 — Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-10|PAPER Thu-1-4-10 — Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2091.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-1|PAPER Wed-SS-1-12-1 — Online Monaural Speech Enhancement Using Delayed Subband LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Monaural Speech Enhancement Using Delayed Subband LSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-13|PAPER Wed-SS-1-4-13 — Are you Wearing a Mask? Improving Mask Detection from Speech Using Augmentation by Cycle-Consistent GANs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are you Wearing a Mask? Improving Mask Detection from Speech Using Augmentation by Cycle-Consistent GANs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2639.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-6|PAPER Wed-1-8-6 — Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2587.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-5|PAPER Wed-SS-1-6-5 — Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2599.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-5|PAPER Thu-3-6-5 — Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-6|PAPER Mon-2-1-6 — Augmenting Generative Adversarial Networks for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Generative Adversarial Networks for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-2|PAPER Wed-1-9-2 — Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1843.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-3|PAPER Wed-1-2-3 — An Audio-Based Wakeword-Independent Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Audio-Based Wakeword-Independent Verification System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-7|PAPER Wed-1-2-7 — Building a Robust Word-Level Wakeword Verification Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Robust Word-Level Wakeword Verification Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1794.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-2|PAPER Mon-1-4-2 — Poetic Meter Classification Using i-Vector-MTF Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Poetic Meter Classification Using i-Vector-MTF Fusion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-5|PAPER Mon-3-2-5 — Competency Evaluation in Voice Mimicking Using Acoustic Cues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Competency Evaluation in Voice Mimicking Using Acoustic Cues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-4|PAPER Mon-3-8-4 — Stochastic Talking Face Generation Using Latent Distribution Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stochastic Talking Face Generation Using Latent Distribution Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-6|PAPER Mon-2-1-6 — Augmenting Generative Adversarial Networks for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Generative Adversarial Networks for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-2|PAPER Wed-1-9-2 — Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3147.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-10|PAPER Mon-2-8-10 — SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2649.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-7|PAPER Wed-1-8-7 — Risk Forecasting from Earnings Calls Acoustics and Network Correlations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Risk Forecasting from Earnings Calls Acoustics and Network Correlations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2482.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-6|PAPER Thu-2-6-6 — End-to-End Named Entity Recognition from English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Named Entity Recognition from English Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1958.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-1|PAPER Tue-1-8-1 — Robust Beam Search for Encoder-Decoder Attention Based Speech Recognition Without Length Bias]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Beam Search for Encoder-Decoder Attention Based Speech Recognition Without Length Bias</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-7|PAPER Tue-1-8-7 — LVCSR with Transformer Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LVCSR with Transformer Language Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1855.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-6|PAPER Wed-2-8-6 — A New Training Pipeline for an Improved Neural Transducer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Training Pipeline for an Improved Neural Transducer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2675.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-6|PAPER Thu-1-2-6 — Early Stage LM Integration Using Local and Global Log-Linear Combination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Early Stage LM Integration Using Local and Global Log-Linear Combination</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1849.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-5|PAPER Thu-1-3-5 — Investigation of Large-Margin Softmax in Neural Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Large-Margin Softmax in Neural Language Modeling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1244.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-9|PAPER Thu-2-8-9 — Context-Dependent Acoustic Modeling Without Explicit Phone Clustering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Acoustic Modeling Without Explicit Phone Clustering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3147.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-10|PAPER Mon-2-8-10 — SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2649.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-7|PAPER Wed-1-8-7 — Risk Forecasting from Earnings Calls Acoustics and Network Correlations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Risk Forecasting from Earnings Calls Acoustics and Network Correlations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2861.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-10|PAPER Thu-2-9-10 — Controllable Neural Text-to-Speech Synthesis Using Intuitive Prosodic Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controllable Neural Text-to-Speech Synthesis Using Intuitive Prosodic Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2224.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-4|PAPER Thu-2-1-4 — Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1950.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-6|PAPER Mon-1-7-6 — Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1708.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-1|PAPER Mon-1-11-1 — Metric Learning Loss Functions to Reduce Domain Mismatch in the x-Vector Space for Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metric Learning Loss Functions to Reduce Domain Mismatch in the x-Vector Space for Language Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2880.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-10|PAPER Wed-2-1-10 — BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-9|PAPER Wed-2-10-9 — Surfboard: Audio Feature Extraction for Modern Machine Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surfboard: Audio Feature Extraction for Modern Machine Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-4|PAPER Wed-2-11-4 — Phonological Features for 0-Shot Multilingual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Features for 0-Shot Multilingual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-6|PAPER Wed-3-4-6 — Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3197.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-6|PAPER Thu-3-9-6 — Parkinson’s Disease Detection from Speech Using Single Frequency Filtering Cepstral Coefficients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parkinson’s Disease Detection from Speech Using Single Frequency Filtering Cepstral Coefficients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1323.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-1|PAPER Wed-3-10-1 — Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-2|PAPER Wed-3-10-2 — Non-Parallel Emotion Conversion Using a Deep-Generative Hybrid Network and an Adversarial Pair Discriminator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Emotion Conversion Using a Deep-Generative Hybrid Network and an Adversarial Pair Discriminator</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-4|PAPER Mon-3-8-4 — Stochastic Talking Face Generation Using Latent Distribution Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stochastic Talking Face Generation Using Latent Distribution Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-3|PAPER Tue-1-10-3 — Scaling Processes of Clause Chains in Pitjantjatjara]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Processes of Clause Chains in Pitjantjatjara</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1322.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-1|PAPER Mon-2-4-1 — Secondary Phonetic Cues in the Production of the Nasal Short-a System in California English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Secondary Phonetic Cues in the Production of the Nasal Short-a System in California English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1252.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-1|PAPER Wed-SS-2-3-1 — Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2388.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-8|PAPER Wed-2-4-8 — Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2519.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-7|PAPER Wed-3-2-7 — Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1322.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-1|PAPER Mon-2-4-1 — Secondary Phonetic Cues in the Production of the Nasal Short-a System in California English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Secondary Phonetic Cues in the Production of the Nasal Short-a System in California English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-3|PAPER Wed-1-11-3 — A Recursive Network with Dynamic Attention for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Recursive Network with Dynamic Attention for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2241.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-7|PAPER Mon-3-10-7 — Air-Tissue Boundary Segmentation in Real Time Magnetic Resonance Imaging Video Using 3-D Convolutional Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Air-Tissue Boundary Segmentation in Real Time Magnetic Resonance Imaging Video Using 3-D Convolutional Neural Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2259.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-4|PAPER Wed-2-10-4 — Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3084.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-8|PAPER Thu-SS-1-6-8 — A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-2|PAPER Mon-2-7-2 — Improving the Speaker Identity of Non-Parallel Many-to-Many Voice Conversion with Adversarial Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving the Speaker Identity of Non-Parallel Many-to-Many Voice Conversion with Adversarial Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2910.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-9|PAPER Tue-1-7-9 — Understanding the Effect of Voice Quality and Accent on Talker Similarity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding the Effect of Voice Quality and Accent on Talker Similarity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2729.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-8|PAPER Wed-SS-1-6-8 — Comparing Natural Language Processing Techniques for Alzheimer’s Dementia Prediction in Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparing Natural Language Processing Techniques for Alzheimer’s Dementia Prediction in Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2236.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-5|PAPER Mon-1-4-5 — Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1878.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-4|PAPER Wed-1-2-4 — Learnable Spectro-Temporal Receptive Fields for Robust Voice Type Discrimination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learnable Spectro-Temporal Receptive Fields for Robust Voice Type Discrimination</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-3|PAPER Tue-1-8-3 — An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-4|PAPER Wed-2-9-4 — A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1657.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-4|PAPER Wed-3-1-4 — Unsupervised Feature Adaptation Using Adversarial Multi-Task Training for Automatic Evaluation of Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Feature Adaptation Using Adversarial Multi-Task Training for Automatic Evaluation of Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2383.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-2|PAPER Wed-SS-2-7-2 — The “Sound of Silence” in EEG — Cognitive Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The “Sound of Silence” in EEG — Cognitive Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2138.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-9|PAPER Mon-1-2-9 — Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-10|PAPER Thu-2-6-10 — Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3046.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-13|PAPER Wed-3-10-13 — The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2380.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-3|PAPER Thu-3-3-3 — Hide and Speak: Towards Deep Neural Networks for Speech Steganography]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hide and Speak: Towards Deep Neural Networks for Speech Steganography</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2730.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-5|PAPER Wed-SS-1-12-5 — A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-6|PAPER Wed-SS-1-12-6 — PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1212.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-1|PAPER Thu-SS-1-6-1 — Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-4|PAPER Mon-1-9-4 — WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1437.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-5|PAPER Thu-1-1-5 — StrawNet: Self-Training WaveNet for TTS in Low-Data Regimes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StrawNet: Self-Training WaveNet for TTS in Low-Data Regimes</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1430.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-6|PAPER Thu-2-9-6 — Improving the Prosody of RNN-Based English Text-To-Speech Synthesis by Incorporating a BERT Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving the Prosody of RNN-Based English Text-To-Speech Synthesis by Incorporating a BERT Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1598.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-10|PAPER Thu-3-6-10 — Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3095.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-6|PAPER Wed-1-10-6 — Bilingual Acoustic Voice Variation is Similarly Structured Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bilingual Acoustic Voice Variation is Similarly Structured Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-7|PAPER Wed-2-9-7 — DNN No-Reference PSTN Speech Quality Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DNN No-Reference PSTN Speech Quality Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2983.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-6|PAPER Tue-1-1-6 — Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2655.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-9|PAPER Mon-3-4-9 — Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-4|PAPER Mon-2-11-4 — A Federated Approach in Training Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Federated Approach in Training Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-12|PAPER Wed-3-10-12 — GAN-Based Data Generation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GAN-Based Data Generation for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-5|PAPER Thu-SS-1-6-5 — Sequence-Level Self-Learning with Multiple Hypotheses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-Level Self-Learning with Multiple Hypotheses</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-3|PAPER Wed-SS-1-4-3 — Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Soapbox Labs Verification Platform for Child Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2983.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-6|PAPER Tue-1-1-6 — Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-4|PAPER Wed-2-8-4 — Semi-Supervised Learning with Data Augmentation for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning with Data Augmentation for End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-1|PAPER Mon-SS-1-6-1 — Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-2|PAPER Thu-1-8-2 — Mixtures of Deep Neural Experts for Automated Speech Scoring]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixtures of Deep Neural Experts for Automated Speech Scoring</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1627.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-9|PAPER Tue-1-9-9 — An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2362.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-6|PAPER Thu-3-2-6 — Evaluating the Reliability of Acoustic Speech Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating the Reliability of Acoustic Speech Embeddings</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1196.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-1|PAPER Thu-3-6-1 — Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2158.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-6|PAPER Wed-3-8-6 — Generalized Minimal Distortion Principle for Blind Source Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generalized Minimal Distortion Principle for Blind Source Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1168.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-1|PAPER Thu-3-11-1 — Sparseness-Aware DOA Estimation with Majorization Minimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sparseness-Aware DOA Estimation with Majorization Minimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-2|PAPER Thu-2-2-2 — Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2944.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-8|PAPER Thu-2-7-8 — Improving On-Device Speaker Verification Using Federated Learning with Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving On-Device Speaker Verification Using Federated Learning with Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1810.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-6|PAPER Mon-3-2-6 — Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1814.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-4|PAPER Wed-1-7-4 — Audio-Visual Speaker Recognition with a Cross-Modal Discriminative Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Speaker Recognition with a Cross-Modal Discriminative Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1249.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-1|PAPER Wed-SS-3-11-1 — The INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1052.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-1|PAPER Thu-SS-2-5-1 — The Attacker’s Perspective on Automatic Speaker Verification: An Overview]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Attacker’s Perspective on Automatic Speaker Verification: An Overview</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-1|PAPER Thu-2-7-1 — Speaker-Utterance Dual Attention for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Utterance Dual Attention for Speaker and Utterance Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-10|PAPER Tue-1-5-10 — Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1497.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-3|PAPER Mon-1-8-3 — Anti-Aliasing Regularization in Stacking Layers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Aliasing Regularization in Stacking Layers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-5|PAPER Wed-2-8-5 — Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-4|PAPER Mon-1-7-4 — New Advances in Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">New Advances in Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1742.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-7|PAPER Tue-1-2-7 — Siamese X-Vector Reconstruction for Domain Adapted Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Siamese X-Vector Reconstruction for Domain Adapted Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-5|PAPER Wed-3-10-5 — Principal Style Components: Expressive Style Control and Cross-Speaker Transfer in Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Principal Style Components: Expressive Style Control and Cross-Speaker Transfer in Neural TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-9|PAPER Mon-2-11-9 — Iterative Pseudo-Labeling for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Pseudo-Labeling for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-10|PAPER Wed-2-6-10 — MLS: A Large-Scale Multilingual Dataset for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLS: A Large-Scale Multilingual Dataset for Speech Research</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2831.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-5|PAPER Thu-3-5-5 — Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2433.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-7|PAPER Wed-2-1-7 — Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1705.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-9|PAPER Mon-1-9-9 — Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1703.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-6|PAPER Wed-1-9-6 — Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-3|PAPER Thu-2-8-3 — Class LM and Word Mapping for Contextual Biasing in End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class LM and Word Mapping for Contextual Biasing in End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-6|PAPER Mon-2-7-6 — Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-8|PAPER Wed-3-5-8 — Deep Speaker Embedding with Long Short Term Centroid Learning for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Embedding with Long Short Term Centroid Learning for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-5|PAPER Thu-2-7-5 — Why Did the x-Vector System Miss a Target Speaker? Impact of Acoustic Mismatch Upon Target Score on VoxCeleb Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Why Did the x-Vector System Miss a Target Speaker? Impact of Acoustic Mismatch Upon Target Score on VoxCeleb Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-2|PAPER Thu-2-2-2 — Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-8|PAPER Thu-2-2-8 — Empirical Interpretation of Speech Emotion Perception with Attention Based Model for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Empirical Interpretation of Speech Emotion Perception with Attention Based Model for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2665.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-6|PAPER Wed-2-9-6 — An Open Source Implementation of ITU-T Recommendation P.808 with Validation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Open Source Implementation of ITU-T Recommendation P.808 with Validation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-7|PAPER Wed-2-9-7 — DNN No-Reference PSTN Speech Quality Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DNN No-Reference PSTN Speech Quality Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-2|PAPER Wed-3-2-2 — An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2285.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-5|PAPER Thu-1-4-5 — End-to-End Domain-Adversarial Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Domain-Adversarial Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-1|PAPER Wed-1-10-1 — The Effect of Language Proficiency on the Perception of Segmental Foreign Accent]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Language Proficiency on the Perception of Segmental Foreign Accent</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-9|PAPER Mon-SS-2-6-9 — Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-5|PAPER Wed-1-8-5 — Automatic Speech Recognition Benchmark for Air-Traffic Communications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Speech Recognition Benchmark for Air-Traffic Communications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-4|PAPER Mon-1-9-4 — WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-1|PAPER Thu-2-11-1 — Speech Enhancement Based on Beamforming and Post-Filtering by Combining Phase Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Based on Beamforming and Post-Filtering by Combining Phase Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-1|PAPER Mon-1-1-1 — On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2141.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-4|PAPER Tue-1-8-4 — Combination of End-to-End and Hybrid Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combination of End-to-End and Hybrid Models for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-10|PAPER Wed-1-5-10 — Transfer Learning Approaches for Streaming End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning Approaches for Streaming End-to-End Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-6|PAPER Mon-2-7-6 — Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1754.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-3|PAPER Wed-2-11-3 — Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-3|PAPER Thu-1-11-3 — Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1737.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-4|PAPER Thu-1-11-4 — Bi-Level Speaker Supervision for One-Shot Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Level Speaker Supervision for One-Shot Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-6|PAPER Thu-3-4-6 — Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2624.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-8|PAPER Wed-3-8-8 — Meta Multi-Task Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta Multi-Task Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1814.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-4|PAPER Wed-1-7-4 — Audio-Visual Speaker Recognition with a Cross-Modal Discriminative Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Speaker Recognition with a Cross-Modal Discriminative Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1593.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-1|PAPER Wed-2-5-1 — Adversarial Latent Representation Learning for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Latent Representation Learning for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2562.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-6|PAPER Thu-1-7-6 — Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-10|PAPER Wed-1-7-10 — Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1632.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-2|PAPER Mon-2-9-2 — Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2909.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-4|PAPER Thu-3-8-4 — Efficient MDI Adaptation for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient MDI Adaptation for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-5|PAPER Wed-2-5-5 — Speech Separation Based on Multi-Stage Elaborated Dual-Path Deep BiLSTM with Auxiliary Identity Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Separation Based on Multi-Stage Elaborated Dual-Path Deep BiLSTM with Auxiliary Identity Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1751.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-5|PAPER Wed-3-4-5 — Enhancing Monotonicity for Robust Autoregressive Transformer TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonicity for Robust Autoregressive Transformer TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-6|PAPER Wed-2-11-6 — Tone Learning in Low-Resource Bilingual TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tone Learning in Low-Resource Bilingual TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2207.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-7|PAPER Thu-2-10-7 — A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-4|PAPER Wed-1-5-4 — Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-8|PAPER Thu-1-2-8 — Emitting Word Timings with End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emitting Word Timings with End-to-End Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1465.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-5|PAPER Thu-3-8-5 — Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-10|PAPER Wed-1-5-10 — Transfer Learning Approaches for Streaming End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning Approaches for Streaming End-to-End Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2654.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-7|PAPER Wed-2-11-7 — On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3115.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-9|PAPER Wed-2-4-9 — Unsupervised Audio Source Separation Using Generative Priors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Audio Source Separation Using Generative Priors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-7|PAPER Tue-1-3-7 — Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-8|PAPER Wed-1-2-8 — A Transformer-Based Audio Captioning Model with Keyword Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Transformer-Based Audio Captioning Model with Keyword Estimation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-8|PAPER Wed-2-8-8 — Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2469.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-9|PAPER Wed-3-4-9 — Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-2|PAPER Mon-3-2-2 — POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2550.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-6|PAPER Thu-1-8-6 — Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2267.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-4|PAPER Thu-3-6-4 — Dysarthric Speech Recognition Based on Deep Metric Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dysarthric Speech Recognition Based on Deep Metric Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1301.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-10|PAPER Mon-3-7-10 — Frame-Wise Online Unsupervised Adaptation of DNN-HMM Acoustic Model from Perspective of Robust Adaptive Filtering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Frame-Wise Online Unsupervised Adaptation of DNN-HMM Acoustic Model from Perspective of Robust Adaptive Filtering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2116.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-9|PAPER Thu-1-1-9 — Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-4|PAPER Tue-1-10-4 — Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-10|PAPER Thu-1-4-10 — Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-4|PAPER Thu-1-4-4 — A Noise Robust Technique for Detecting Vowels in Speech Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Noise Robust Technique for Detecting Vowels in Speech Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1112.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-10|PAPER Thu-2-8-10 — Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2326.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-6|PAPER Thu-1-4-6 — VOP Detection in Variable Speech Rate Condition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VOP Detection in Variable Speech Rate Condition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1805.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-8|PAPER Thu-3-9-8 — Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1900.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-6|PAPER Mon-1-2-6 — Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-6|PAPER Mon-3-4-6 — Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-7|PAPER Mon-3-4-7 — An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-1|PAPER Wed-2-10-1 — Transfer Learning of Articulatory Information Through Phone Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning of Articulatory Information Through Phone Information</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1140.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-2|PAPER Wed-2-10-2 — Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-5|PAPER Thu-2-8-5 — Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2030.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-10|PAPER Tue-1-9-10 — How Ordinal Are Your Data?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How Ordinal Are Your Data?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-2|PAPER Mon-3-2-2 — POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2110.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-7|PAPER Wed-2-6-7 — Neural Speech Completion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Completion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-1|PAPER Wed-SS-2-7-1 — Combining Audio and Brain Activity for Predicting Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Audio and Brain Activity for Predicting Speech Quality</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-8|PAPER Thu-2-8-8 — Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-5|PAPER Thu-3-7-5 — Transformer VQ-VAE for Unsupervised Unit Discovery and Speech Synthesis: ZeroSpeech 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer VQ-VAE for Unsupervised Unit Discovery and Speech Synthesis: ZeroSpeech 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-1|PAPER Thu-3-8-1 — Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2857.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-10|PAPER Wed-SS-1-4-10 — Paralinguistic Classification of Mask Wearing by Image Classifiers and Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Paralinguistic Classification of Mask Wearing by Image Classifiers and Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-9|PAPER Thu-2-6-9 — Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-10|PAPER Thu-2-6-10 — Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3084.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-8|PAPER Thu-SS-1-6-8 — A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-7|PAPER Mon-1-10-7 — FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2962.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-1|PAPER Mon-3-7-1 — Continual Learning in Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Continual Learning in Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2929.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-7|PAPER Thu-2-6-7 — Semantic Complexity in End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Complexity in End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-7|PAPER Mon-3-7-7 — Learning Fast Adaptation on Cross-Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Fast Adaptation on Cross-Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1449.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-2|PAPER Wed-1-2-2 — A Low Latency ASR-Free End to End Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Low Latency ASR-Free End to End Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1575.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-9|PAPER Mon-1-1-9 — Implicit Transfer of Privileged Acoustic Information in a Generalized Knowledge Distillation Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Implicit Transfer of Privileged Acoustic Information in a Generalized Knowledge Distillation Framework</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-7|PAPER Wed-3-12-7 — Resource-Adaptive Deep Learning for Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Resource-Adaptive Deep Learning for Visual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2593.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-2|PAPER Thu-3-5-2 — Transliteration Based Data Augmentation for Training Multilingual ASR Acoustic Models in Low Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transliteration Based Data Augmentation for Training Multilingual ASR Acoustic Models in Low Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2671.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-9|PAPER Wed-3-2-9 — Detecting and Counting Overlapping Speakers in Distant Speech Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting and Counting Overlapping Speakers in Distant Speech Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-4|PAPER Tue-1-10-4 — Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2654.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-7|PAPER Wed-2-11-7 — On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4008.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-4|PAPER Mon-2-12-4 — Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-9|PAPER Thu-2-6-9 — Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-4|PAPER Thu-1-1-4 — Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2440.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-10|PAPER Mon-3-1-10 — Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1233.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-5|PAPER Tue-1-8-5 — Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-5|PAPER Mon-2-2-5 — PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-5|PAPER Tue-1-5-5 — An Alternative to MFCCs for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Alternative to MFCCs for ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-1|PAPER Thu-1-3-1 — Neural Language Modeling with Implicit Cache Pointers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Language Modeling with Implicit Cache Pointers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1811.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-4|PAPER Thu-2-6-4 — Wake Word Detection with Alignment-Free Lattice-Free MMI]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Wake Word Detection with Alignment-Free Lattice-Free MMI</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2909.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-4|PAPER Thu-3-8-4 — Efficient MDI Adaptation for n-Gram Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient MDI Adaptation for n-Gram Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-4|PAPER Tue-1-2-4 — Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4009.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-5|PAPER Mon-1-12-5 — CATOTRON — A Neural Text-to-Speech System in Catalan]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CATOTRON — A Neural Text-to-Speech System in Catalan</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2836.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-1|PAPER Wed-2-1-1 — The Implication of Sound Level on Spatial Selective Auditory Attention for Cochlear Implant Users: Behavioral and Electrophysiological Measurement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Implication of Sound Level on Spatial Selective Auditory Attention for Cochlear Implant Users: Behavioral and Electrophysiological Measurement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-5|PAPER Wed-SS-2-7-5 — Neural Speech Decoding for Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Decoding for Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-6|PAPER Mon-2-1-6 — Augmenting Generative Adversarial Networks for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Generative Adversarial Networks for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-2|PAPER Wed-1-9-2 — Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-2|PAPER Thu-1-8-2 — Mixtures of Deep Neural Experts for Automated Speech Scoring]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixtures of Deep Neural Experts for Automated Speech Scoring</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1459.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-8|PAPER Thu-3-6-8 — Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2320.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-2|PAPER Mon-1-9-2 — Multimodal Deception Detection Using Automatically Extracted Acoustic, Visual, and Lexical Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Deception Detection Using Automatically Extracted Acoustic, Visual, and Lexical Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-2|PAPER Wed-2-9-2 — Hearing-Impaired Bio-Inspired Cochlear Models for Real-Time Auditory Applications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hearing-Impaired Bio-Inspired Cochlear Models for Real-Time Auditory Applications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-8|PAPER Thu-2-8-8 — Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2110.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-7|PAPER Wed-2-6-7 — Neural Speech Completion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Completion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-1|PAPER Wed-SS-2-7-1 — Combining Audio and Brain Activity for Predicting Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Audio and Brain Activity for Predicting Speech Quality</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-8|PAPER Thu-2-8-8 — Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-5|PAPER Thu-3-7-5 — Transformer VQ-VAE for Unsupervised Unit Discovery and Speech Synthesis: ZeroSpeech 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer VQ-VAE for Unsupervised Unit Discovery and Speech Synthesis: ZeroSpeech 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-1|PAPER Thu-3-8-1 — Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-4|PAPER Wed-SS-1-6-4 — Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1330.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-2|PAPER Wed-3-9-2 — Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3000.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-10|PAPER Thu-3-7-10 — Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-12|PAPER Thu-3-7-12 — Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1094.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-1|PAPER Tue-1-7-1 — g2pM: A Neural Grapheme-to-Phoneme Conversion Package for Mandarin Chinese Based on a New Open Benchmark Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">g2pM: A Neural Grapheme-to-Phoneme Conversion Package for Mandarin Chinese Based on a New Open Benchmark Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2382.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-6|PAPER Tue-1-7-6 — Deep Learning Based Assessment of Synthetic Speech Naturalness]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Assessment of Synthetic Speech Naturalness</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1125.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-8|PAPER Wed-2-9-8 — Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1801.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-2|PAPER Thu-3-9-2 — An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1863.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-9|PAPER Wed-1-5-9 — High Performance Sequence-to-Sequence Model for Streaming Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Performance Sequence-to-Sequence Model for Streaming Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1431.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-7|PAPER Thu-3-9-7 — Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2596.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-9|PAPER Mon-1-5-9 — Can Auditory Nerve Models Tell us What’s Different About WaveNet Vocoded Speech?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Can Auditory Nerve Models Tell us What’s Different About WaveNet Vocoded Speech?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-4|PAPER Thu-1-7-4 — Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-4|PAPER Mon-2-11-4 — A Federated Approach in Training Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Federated Approach in Training Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-12|PAPER Wed-3-10-12 — GAN-Based Data Generation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GAN-Based Data Generation for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-5|PAPER Thu-SS-1-6-5 — Sequence-Level Self-Learning with Multiple Hypotheses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-Level Self-Learning with Multiple Hypotheses</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-2|PAPER Mon-2-1-2 — End-to-End Speech Emotion Recognition Combined with Acoustic-to-Word ASR Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Emotion Recognition Combined with Acoustic-to-Word ASR Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1179.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-3|PAPER Thu-1-3-3 — Distilling the Knowledge of BERT for Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distilling the Knowledge of BERT for Sequence-to-Sequence ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-6|PAPER Thu-1-9-6 — End-to-End Speech-to-Dialog-Act Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech-to-Dialog-Act Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2024.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-6|PAPER Mon-3-10-6 — Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-2|PAPER Mon-3-2-2 — POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2202.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-1|PAPER Wed-SS-1-6-1 — Tackling the ADReSS Challenge: A Multimodal Approach to the Automated Recognition of Alzheimer’s Dementia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tackling the ADReSS Challenge: A Multimodal Approach to the Automated Recognition of Alzheimer’s Dementia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-2|PAPER Tue-1-2-2 — Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-2|PAPER Wed-2-12-2 — Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-4|PAPER Thu-1-1-4 — Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1465.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-5|PAPER Thu-3-8-5 — Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3093.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-8|PAPER Wed-2-1-8 — UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-4|PAPER Tue-1-10-4 — Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-2|PAPER Mon-3-4-2 — Acoustic Scene Classification Using Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-1|PAPER Tue-1-2-1 — Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1564.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-6|PAPER Tue-1-2-6 — Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-2|PAPER Thu-1-9-2 — Conditional Response Augmentation for Dialogue Using Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Response Augmentation for Dialogue Using Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1542.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-5|PAPER Thu-3-4-5 — Cotatron: Transcription-Guided Speech Encoder for Any-to-Many Voice Conversion Without Parallel Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cotatron: Transcription-Guided Speech Encoder for Any-to-Many Voice Conversion Without Parallel Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2312.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-2|PAPER Wed-2-6-2 — Multimodal Speech Emotion Recognition Using Cross Attention with Aligned Audio and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Speech Emotion Recognition Using Cross Attention with Aligned Audio and Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-4|PAPER Wed-1-3-4 — Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-2|PAPER Thu-1-9-2 — Conditional Response Augmentation for Dialogue Using Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Response Augmentation for Dialogue Using Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-4|PAPER Wed-1-3-4 — Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-4|PAPER Thu-1-7-4 — Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1281.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-4|PAPER Mon-3-7-4 — Speech Transformer with Speaker Aware Persistent Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Transformer with Speaker Aware Persistent Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1716.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-6|PAPER Thu-3-10-6 — Universal Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Speech Transformer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1198.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-8|PAPER Thu-3-10-8 — Cross Attention with Monotonic Alignment for Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross Attention with Monotonic Alignment for Speech Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3046.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-13|PAPER Wed-3-10-13 — The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1889.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-8|PAPER Thu-3-4-8 — Improved Zero-Shot Voice Conversion Using Explicit Conditioning Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Zero-Shot Voice Conversion Using Explicit Conditioning Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-9|PAPER Wed-SS-1-4-9 — Deep Attentive End-to-End Continuous Breath Sensing from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attentive End-to-End Continuous Breath Sensing from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-11|PAPER Wed-SS-1-6-11 — Exploring MMSE Score Prediction Using Verbal and Non-Verbal Cues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring MMSE Score Prediction Using Verbal and Non-Verbal Cues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2048.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-3|PAPER Thu-1-10-3 — SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1742.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-7|PAPER Tue-1-2-7 — Siamese X-Vector Reconstruction for Domain Adapted Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Siamese X-Vector Reconstruction for Domain Adapted Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1212.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-1|PAPER Thu-SS-1-6-1 — Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-4|PAPER Mon-1-11-4 — What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2271.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-5|PAPER Wed-1-10-5 — Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-1|PAPER Mon-1-2-1 — Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-2|PAPER Wed-3-8-2 — Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-2|PAPER Mon-1-5-2 — FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-9|PAPER Wed-2-5-9 — Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2530.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-9|PAPER Mon-2-7-9 — Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-10|PAPER Wed-3-10-10 — Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2828.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-6|PAPER Mon-3-1-6 — Multilingual Jointly Trained Acoustic and Written Word Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Jointly Trained Acoustic and Written Word Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-2|PAPER Thu-2-4-2 — An Audio-Enriched BERT-Based Framework for Spoken Multiple-Choice Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Audio-Enriched BERT-Based Framework for Spoken Multiple-Choice Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2907.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-6|PAPER Tue-1-3-6 — Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1865.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-3|PAPER Thu-1-9-3 — Prototypical Q Networks for Automatic Conversational Diagnosis and Few-Shot New Disease Adaption]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prototypical Q Networks for Automatic Conversational Diagnosis and Few-Shot New Disease Adaption</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1617.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-3|PAPER Mon-3-3-3 — Lite Audio-Visual Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lite Audio-Visual Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-9|PAPER Wed-1-2-9 — Neural Architecture Search for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1465.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-5|PAPER Thu-3-8-5 — Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-5|PAPER Wed-SS-3-11-5 — The JD AI Speaker Verification System for the FFSVC 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JD AI Speaker Verification System for the FFSVC 2020 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1274.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-8|PAPER Wed-1-1-8 — The Acoustic Realization of Mandarin Tones in Fast Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Acoustic Realization of Mandarin Tones in Fast Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1161.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-5|PAPER Mon-2-5-5 — Investigation of Data Augmentation Techniques for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Data Augmentation Techniques for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-2|PAPER Mon-2-7-2 — Improving the Speaker Identity of Non-Parallel Many-to-Many Voice Conversion with Adversarial Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving the Speaker Identity of Non-Parallel Many-to-Many Voice Conversion with Adversarial Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-1|PAPER Mon-2-10-1 — AutoSpeech: Neural Architecture Search for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech: Neural Architecture Search for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1647.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-4|PAPER Wed-3-10-4 — Nonparallel Emotional Speech Conversion Using VAE-GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion Using VAE-GAN</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-1|PAPER Thu-3-2-1 — Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-9|PAPER Thu-3-4-9 — Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3174.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-6|PAPER Tue-1-8-6 — Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-5|PAPER Thu-2-8-5 — Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1755.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-3|PAPER Thu-3-6-3 — Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1751.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-5|PAPER Wed-3-4-5 — Enhancing Monotonicity for Robust Autoregressive Transformer TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonicity for Robust Autoregressive Transformer TTS</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-6|PAPER Thu-1-11-6 — MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-11|PAPER Thu-1-11-11 — MultiSpeech: Multi-Speaker Text to Speech with Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MultiSpeech: Multi-Speaker Text to Speech with Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-2|PAPER Mon-3-5-2 — Peking Opera Synthesis via Duration Informed Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Peking Opera Synthesis via Duration Informed Attention Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1806.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-4|PAPER Mon-3-5-4 — Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-7|PAPER Thu-3-4-7 — ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1163.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-1|PAPER Wed-2-11-1 — Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1518.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-9|PAPER Thu-2-4-9 — Detecting Domain-Specific Credibility and Expertise in Text and Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Domain-Specific Credibility and Expertise in Text and Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-1|PAPER Thu-2-7-1 — Speaker-Utterance Dual Attention for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Utterance Dual Attention for Speaker and Utterance Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-9|PAPER Wed-2-5-9 — Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-7|PAPER Mon-3-11-7 — Time-Domain Target-Speaker Speech Separation with Waveform-Based Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Time-Domain Target-Speaker Speech Separation with Waveform-Based Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-4|PAPER Thu-2-4-4 — Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-2|PAPER Mon-1-2-2 — Neural Spatio-Temporal Beamformer for Target Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatio-Temporal Beamformer for Target Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-2|PAPER Wed-3-1-2 — An Effective End-to-End Modeling Approach for Mispronunciation Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective End-to-End Modeling Approach for Mispronunciation Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-10|PAPER Thu-2-1-10 — Phase-Aware Music Super-Resolution Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase-Aware Music Super-Resolution Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2471.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-2|PAPER Mon-1-1-2 — SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-5|PAPER Mon-2-3-5 — Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-8|PAPER Wed-1-5-8 — Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-3|PAPER Wed-3-7-3 — Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-9|PAPER Wed-3-10-9 — Simultaneous Conversion of Speaker Identity and Emotion Based on Multiple-Domain Adaptive RBM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simultaneous Conversion of Speaker Identity and Emotion Based on Multiple-Domain Adaptive RBM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-1|PAPER Mon-1-7-1 — End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-5|PAPER Mon-1-8-5 — End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2418.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-10|PAPER Wed-2-5-10 — Speaker-Conditional Chain Model for Speech Separation and Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Conditional Chain Model for Speech Separation and Extraction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2970.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-10|PAPER Wed-3-5-10 — Learning Speaker Embedding from Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Embedding from Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2816.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-4|PAPER Thu-1-2-4 — End-to-End ASR with Adaptive Span Self-Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End ASR with Adaptive Span Self-Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2404.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-7|PAPER Thu-1-3-7 — Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1619.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-8|PAPER Thu-1-3-8 — Insertion-Based Modeling for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Insertion-Based Modeling for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-6|PAPER Mon-2-12-6 — Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-5|PAPER Wed-2-11-5 — Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2469.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-9|PAPER Wed-3-4-9 — Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2347.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-8|PAPER Thu-1-11-8 — End-to-End Text-to-Speech Synthesis with Unaligned Multiple Language Units Based on Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Text-to-Speech Synthesis with Unaligned Multiple Language Units Based on Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1195.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-6|PAPER Wed-2-6-6 — Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1179.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-3|PAPER Thu-1-3-3 — Distilling the Knowledge of BERT for Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distilling the Knowledge of BERT for Sequence-to-Sequence ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2371.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-7|PAPER Wed-2-4-7 — Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1454.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-5|PAPER Thu-1-10-5 — Generative Adversarial Network Based Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Network Based Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-6|PAPER Mon-2-11-6 — On Front-End Gain Invariant Modeling for Wake Word Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Front-End Gain Invariant Modeling for Wake Word Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1843.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-3|PAPER Wed-1-2-3 — An Audio-Based Wakeword-Independent Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Audio-Based Wakeword-Independent Verification System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1491.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-1|PAPER Wed-3-9-1 — Accurate Detection of Wake Word Start and End Using a CNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Accurate Detection of Wake Word Start and End Using a CNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1827.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-7|PAPER Mon-1-9-7 — Multi-Modal Embeddings Using Multi-Task Learning for Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Embeddings Using Multi-Task Learning for Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1539.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-6|PAPER Wed-2-5-6 — Sub-Band Knowledge Distillation Framework for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Knowledge Distillation Framework for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-3|PAPER Thu-2-1-3 — Semi-Supervised Self-Produced Speech Enhancement and Suppression Based on Joint Source Modeling of Air- and Body-Conducted Signals Using Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Self-Produced Speech Enhancement and Suppression Based on Joint Source Modeling of Air- and Body-Conducted Signals Using Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-7|PAPER Thu-2-1-7 — Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-8|PAPER Wed-1-2-8 — A Transformer-Based Audio Captioning Model with Keyword Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Transformer-Based Audio Captioning Model with Keyword Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1484.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-3|PAPER Mon-1-2-3 — Online Directional Speech Enhancement Using Geometrically Constrained Independent Vector Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Directional Speech Enhancement Using Geometrically Constrained Independent Vector Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2138.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-9|PAPER Mon-1-2-9 — Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1591.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-8|PAPER Mon-3-3-8 — Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2210.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-8|PAPER Mon-3-11-8 — Listen to What You Want: Neural Network-Based Universal Sound Selector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen to What You Want: Neural Network-Based Universal Sound Selector</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-1|PAPER Mon-1-7-1 — End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1050.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-9|PAPER Mon-1-8-9 — Utterance-Wise Meeting Transcription System Using Asynchronous Distributed Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance-Wise Meeting Transcription System Using Asynchronous Distributed Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-7|PAPER Tue-1-3-7 — Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-8|PAPER Wed-2-8-8 — Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1969.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-4|PAPER Wed-3-2-4 — Audio-Visual Multi-Speaker Tracking Based on the GLMB Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Speaker Tracking Based on the GLMB Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1161.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-5|PAPER Mon-2-5-5 — Investigation of Data Augmentation Techniques for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Data Augmentation Techniques for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-2|PAPER Tue-1-8-2 — Transformer with Bidirectional Decoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer with Bidirectional Decoder for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2585.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-8|PAPER Wed-3-2-8 — Attentive Convolutional Recurrent Neural Network Using Phoneme-Level Acoustic Representation for Rare Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentive Convolutional Recurrent Neural Network Using Phoneme-Level Acoustic Representation for Rare Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-9|PAPER Thu-2-6-9 — Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2699.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-9|PAPER Thu-2-7-9 — Neural PLDA Modeling for End-to-End Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural PLDA Modeling for End-to-End Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1841.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-4|PAPER Mon-2-3-4 — Sentence Level Estimation of Psycholinguistic Norms Using Joint Multidimensional Annotations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sentence Level Estimation of Psycholinguistic Norms Using Joint Multidimensional Annotations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1249.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-1|PAPER Wed-SS-3-11-1 — The INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1269.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-1|PAPER Thu-2-1-1 — Exploiting Conic Affinity Measures to Design Speech Enhancement Systems Operating in Unseen Noise Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Conic Affinity Measures to Design Speech Enhancement Systems Operating in Unseen Noise Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2819.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-3|PAPER Thu-3-1-3 — Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-7|PAPER Thu-2-1-7 — Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2231.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-7|PAPER Thu-SS-1-6-7 — Understanding Self-Attention of Self-Supervised Audio Transformers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding Self-Attention of Self-Supervised Audio Transformers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-1|PAPER Mon-1-2-1 — Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-2|PAPER Wed-3-8-2 — Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1255.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-3|PAPER Mon-3-2-3 — Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-6|PAPER Wed-1-7-6 — Multi-Modality Matters: A Performance Leap on VoxCeleb]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modality Matters: A Performance Leap on VoxCeleb</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-9|PAPER Wed-2-12-9 — Adversarial Domain Adaptation for Speaker Verification Using Partially Shared Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Domain Adaptation for Speaker Verification Using Partially Shared Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1600.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-8|PAPER Wed-3-9-8 — Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-7|PAPER Thu-3-10-7 — Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2985.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-12|PAPER Thu-2-9-12 — Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-2|PAPER Mon-1-3-2 — Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-5|PAPER Wed-2-1-5 — EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2654.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-7|PAPER Wed-2-11-7 — On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1411.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-5|PAPER Thu-2-9-5 — Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1771.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-8|PAPER Mon-2-1-8 — Emotion Profile Refinery for Speech Emotion Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emotion Profile Refinery for Speech Emotion Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1762.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-7|PAPER Wed-1-9-7 — EigenEmo: Spectral Utterance Representation Using Dynamic Mode Decomposition for Speech Emotion Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EigenEmo: Spectral Utterance Representation Using Dynamic Mode Decomposition for Speech Emotion Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1779.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-8|PAPER Wed-1-9-8 — Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-1|PAPER Mon-1-1-1 — On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1292.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-2|PAPER Wed-1-5-2 — Low Latency End-to-End Streaming Speech Recognition with a Scout Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency End-to-End Streaming Speech Recognition with a Scout Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-6|PAPER Thu-1-11-6 — MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-2|PAPER Wed-3-3-2 — Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-6|PAPER Mon-3-8-6 — Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-9|PAPER Thu-1-8-9 — Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-8|PAPER Wed-2-10-8 — Adventitious Respiratory Classification Using Attentive Residual Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adventitious Respiratory Classification Using Attentive Residual Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2365.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-5|PAPER Wed-3-2-5 — Towards Speech Robustness for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Speech Robustness for Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-5|PAPER Wed-1-2-5 — Low Latency Speech Recognition Using End-to-End Prefetching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Speech Recognition Using End-to-End Prefetching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-4|PAPER Thu-3-11-4 — Online Blind Reverberation Time Estimation Using CRNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Blind Reverberation Time Estimation Using CRNNs</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2171.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-5|PAPER Thu-3-11-5 — Single-Channel Blind Direct-to-Reverberation Ratio Estimation Using Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single-Channel Blind Direct-to-Reverberation Ratio Estimation Using Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1753.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-2|PAPER Tue-1-3-2 — ASR Error Correction with Augmented Transformer for Entity Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Error Correction with Augmented Transformer for Entity Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-4|PAPER Mon-2-5-4 — Speech Recognition and Multi-Speaker Diarization of Long Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Recognition and Multi-Speaker Diarization of Long Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2549.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-10|PAPER Thu-2-3-10 — F0 Patterns in Mandarin Statements of Mandarin and Cantonese Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Patterns in Mandarin Statements of Mandarin and Cantonese Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-6|PAPER Mon-1-10-6 — CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2145.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-6|PAPER Thu-2-10-6 — Automatic Detection of Phonological Errors in Child Speech Using Siamese Recurrent Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Phonological Errors in Child Speech Using Siamese Recurrent Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2919.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-4|PAPER Thu-3-5-4 — Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1330.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-2|PAPER Wed-3-9-2 — Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-6|PAPER Mon-2-1-6 — Augmenting Generative Adversarial Networks for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Generative Adversarial Networks for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-2|PAPER Wed-1-9-2 — Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-8|PAPER Wed-3-4-8 — Learning Joint Articulatory-Acoustic Representations with Normalizing Flows]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Joint Articulatory-Acoustic Representations with Normalizing Flows</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1963.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-1|PAPER Mon-2-9-1 — End-to-End Neural Transformer Based Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Neural Transformer Based Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-8|PAPER Thu-2-10-8 — Cross-Linguistic Interaction Between Phonological Categorization and Orthography Predicts Prosodic Effects in the Acquisition of Portuguese Liquids by L1-Mandarin Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Linguistic Interaction Between Phonological Categorization and Orthography Predicts Prosodic Effects in the Acquisition of Portuguese Liquids by L1-Mandarin Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-4|PAPER Mon-3-3-4 — ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-5|PAPER Mon-3-9-5 — Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2748.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-6|PAPER Mon-3-9-6 — A Sound Engineering Approach to Near End Listening Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Sound Engineering Approach to Near End Listening Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2567.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-8|PAPER Tue-1-7-8 — An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-10|PAPER Wed-3-4-10 — Hider-Finder-Combiner: An Adversarial Architecture for General Speech Signal Modification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hider-Finder-Combiner: An Adversarial Architecture for General Speech Signal Modification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2618.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-10|PAPER Thu-1-11-10 — Testing the Limits of Representation Mixing for Pronunciation Correction in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Testing the Limits of Representation Mixing for Pronunciation Correction in End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-10|PAPER Thu-2-1-10 — Phase-Aware Music Super-Resolution Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase-Aware Music Super-Resolution Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1569.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-5|PAPER Thu-1-2-5 — Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2514.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-8|PAPER Thu-3-8-8 — Improving Speech Recognition of Compound-Rich Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition of Compound-Rich Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2785.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-8|PAPER Thu-2-4-8 — Neural Representations of Dialogical History for Improving Upcoming Turn Acoustic Parameters Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Representations of Dialogical History for Improving Upcoming Turn Acoustic Parameters Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1586.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-9|PAPER Thu-3-8-9 — Language Modeling for Speech Analytics in Under-Resourced Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Modeling for Speech Analytics in Under-Resourced Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2734.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-4|PAPER Thu-3-3-4 — Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-3|PAPER Wed-SS-2-7-3 — Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1306.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-3|PAPER Mon-2-10-3 — Phonetically-Aware Coupled Network For Short Duration Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Coupled Network For Short Duration Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-6|PAPER Wed-3-1-6 — ASR-Free Pronunciation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Free Pronunciation Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3056.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-11|PAPER Thu-3-4-11 — Voice Conversion Using Speech-to-Speech Neuro-Style Transfer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Conversion Using Speech-to-Speech Neuro-Style Transfer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1820.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-6|PAPER Thu-2-4-6 — Dimensional Emotion Prediction Based on Interactive Context in Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dimensional Emotion Prediction Based on Interactive Context in Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1197.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-1|PAPER Mon-2-8-1 — Gated Multi-Head Attention Pooling for Weakly Labelled Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gated Multi-Head Attention Pooling for Weakly Labelled Audio Tagging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-1|PAPER Mon-2-5-1 — Augmenting Turn-Taking Prediction with Wearable Eye Activity During Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmenting Turn-Taking Prediction with Wearable Eye Activity During Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1170.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-5|PAPER Wed-2-6-5 — Unsupervised Subword Modeling Using Autoregressive Pretraining and Cross-Lingual Phone-Aware Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Subword Modeling Using Autoregressive Pretraining and Cross-Lingual Phone-Aware Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-5|PAPER Wed-3-10-5 — Principal Style Components: Expressive Style Control and Cross-Speaker Transfer in Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Principal Style Components: Expressive Style Control and Cross-Speaker Transfer in Neural TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1067.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-1|PAPER Wed-2-9-1 — Fundamental Frequency Model for Postfiltering at Low Bitrates in a Transform-Domain Speech and Audio Codec]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fundamental Frequency Model for Postfiltering at Low Bitrates in a Transform-Domain Speech and Audio Codec</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2299.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-2|PAPER Thu-3-3-2 — Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-9|PAPER Wed-3-3-9 — Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-4|PAPER Wed-SS-1-6-4 — Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1081.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-3|PAPER Wed-2-1-3 — Speech Clarity Improvement by Vocal Self-Training Using a Hearing Impairment Simulator and its Correlation with an Auditory Modulation Index]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Clarity Improvement by Vocal Self-Training Using a Hearing Impairment Simulator and its Correlation with an Auditory Modulation Index</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2827.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-2|PAPER Mon-3-1-2 — Development of Multilingual ASR Using GlobalPhone for Less-Resourced Languages: The Case of Ethiopian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Multilingual ASR Using GlobalPhone for Less-Resourced Languages: The Case of Ethiopian Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2856.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-5|PAPER Mon-3-1-5 — Multilingual Acoustic and Language Modeling for Ethio-Semitic Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Acoustic and Language Modeling for Ethio-Semitic Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1058.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-3|PAPER Wed-3-9-3 — MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2239.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-6|PAPER Wed-2-1-6 — Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-3|PAPER Thu-3-10-3 — Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-7|PAPER Thu-3-4-7 — ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-2|PAPER Tue-1-8-2 — Transformer with Bidirectional Decoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer with Bidirectional Decoder for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-6|PAPER Thu-2-7-6 — Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2075.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-5|PAPER Wed-3-5-5 — Intra-Class Variation Reduction of Speaker Representation in Disentanglement Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intra-Class Variation Reduction of Speaker Representation in Disentanglement Framework</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1065.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-1|PAPER Wed-3-12-1 — FaceFilter: Audio-Visual Speech Separation Using Still Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FaceFilter: Audio-Visual Speech Separation Using Still Images</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1113.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-2|PAPER Wed-3-12-2 — Seeing Voices and Hearing Voices: Learning Discriminative Embeddings Using Cross-Modal Self-Supervision]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Seeing Voices and Hearing Voices: Learning Discriminative Embeddings Using Cross-Modal Self-Supervision</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2076.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-3|PAPER Thu-2-7-3 — MIRNet: Learning Multiple Identities Representations in Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MIRNet: Learning Multiple Identities Representations in Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1263.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-5|PAPER Tue-1-4-5 — Does French Listeners’ Ability to Use Accentual Information at the Word Level Depend on the Ear of Presentation?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does French Listeners’ Ability to Use Accentual Information at the Word Level Depend on the Ear of Presentation?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1740.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-7|PAPER Mon-3-3-7 — End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-6|PAPER Wed-1-2-6 — AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1065.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-1|PAPER Wed-3-12-1 — FaceFilter: Audio-Visual Speech Separation Using Still Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FaceFilter: Audio-Visual Speech Separation Using Still Images</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-3|PAPER Thu-3-8-3 — Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1936.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-4|PAPER Thu-2-3-4 — Malayalam-English Code-Switched: Grapheme to Phoneme System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Malayalam-English Code-Switched: Grapheme to Phoneme System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1936.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-4|PAPER Thu-2-3-4 — Malayalam-English Code-Switched: Grapheme to Phoneme System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Malayalam-English Code-Switched: Grapheme to Phoneme System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2482.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-6|PAPER Thu-2-6-6 — End-to-End Named Entity Recognition from English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Named Entity Recognition from English Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-2|PAPER Wed-2-8-2 — Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2056.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-2|PAPER Thu-1-2-2 — Leveraging Unlabeled Speech for Sequence Discriminative Training of Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Unlabeled Speech for Sequence Discriminative Training of Acoustic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1251.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-1|PAPER Thu-2-9-1 — CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-4|PAPER Thu-1-7-4 — Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2919.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-4|PAPER Thu-3-5-4 — Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1813.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-5|PAPER Mon-2-9-5 — Context Dependent RNNLM for Automatic Transcription of Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context Dependent RNNLM for Automatic Transcription of Conversations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-3|PAPER Thu-3-8-3 — Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-10|PAPER Thu-2-6-10 — Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1827.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-7|PAPER Mon-1-9-7 — Multi-Modal Embeddings Using Multi-Task Learning for Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Embeddings Using Multi-Task Learning for Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2297.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-7|PAPER Mon-1-7-7 — Deep Self-Supervised Hierarchical Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Self-Supervised Hierarchical Clustering for Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1813.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-5|PAPER Mon-2-9-5 — Context Dependent RNNLM for Automatic Transcription of Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context Dependent RNNLM for Automatic Transcription of Conversations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2301.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-2|PAPER Tue-1-5-2 — Robust Raw Waveform Speech Recognition Using Relevance Weighted Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Raw Waveform Speech Recognition Using Relevance Weighted Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-10|PAPER Tue-1-5-10 — Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2699.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-9|PAPER Thu-2-7-9 — Neural PLDA Modeling for End-to-End Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural PLDA Modeling for End-to-End Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2674.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-3|PAPER Thu-2-10-3 — Audiovisual Correspondence Learning in Humans and Machines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audiovisual Correspondence Learning in Humans and Machines</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-7|PAPER Wed-2-9-7 — DNN No-Reference PSTN Speech Quality Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DNN No-Reference PSTN Speech Quality Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2845.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-8|PAPER Thu-1-8-8 — Domain Adversarial Neural Networks for Dysarthric Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adversarial Neural Networks for Dysarthric Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1697.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-3|PAPER Mon-3-11-3 — Multimodal Target Speech Separation with Voice and Face References]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Target Speech Separation with Voice and Face References</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2671.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-9|PAPER Wed-3-2-9 — Detecting and Counting Overlapping Speakers in Distant Speech Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting and Counting Overlapping Speakers in Distant Speech Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-3|PAPER Wed-SS-1-4-3 — Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-1|PAPER Wed-1-8-1 — Streaming Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2509.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-4|PAPER Tue-1-4-4 — F0 Slope and Mean: Cues to Speech Segmentation in French]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Slope and Mean: Cues to Speech Segmentation in French</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2944.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-8|PAPER Thu-2-7-8 — Improving On-Device Speaker Verification Using Federated Learning with Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving On-Device Speaker Verification Using Federated Learning with Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2357.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-7|PAPER Mon-2-3-7 — Word Error Rate Estimation Without ASR Output: e-WER2]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Word Error Rate Estimation Without ASR Output: e-WER2</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0017.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-8|PAPER Mon-2-11-8 — On the Robustness and Training Dynamics of Raw Waveform Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Robustness and Training Dynamics of Raw Waveform Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-1|PAPER Tue-1-5-1 — Raw Sign and Magnitude Spectra for Multi-Head Acoustic Modelling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Sign and Magnitude Spectra for Multi-Head Acoustic Modelling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-3|PAPER Tue-1-5-3 — A Deep 2D Convolutional Network for Waveform-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep 2D Convolutional Network for Waveform-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2656.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-7|PAPER Tue-1-5-7 — Deep Scattering Power Spectrum Features for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Scattering Power Spectrum Features for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-4|PAPER Wed-SS-2-3-4 — “This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1784.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-2|PAPER Thu-1-3-2 — Finnish ASR with Deep Transformer Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finnish ASR with Deep Transformer Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1825.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-9|PAPER Thu-3-6-9 — Recognising Emotions in Dysarthric Speech Using Typical Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognising Emotions in Dysarthric Speech Using Typical Speech Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1632.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-2|PAPER Mon-2-9-2 — Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3197.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-6|PAPER Thu-3-9-6 — Parkinson’s Disease Detection from Speech Using Single Frequency Filtering Cepstral Coefficients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parkinson’s Disease Detection from Speech Using Single Frequency Filtering Cepstral Coefficients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-10|PAPER Wed-1-3-10 — A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-6|PAPER Wed-3-7-6 — Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1551.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-7|PAPER Wed-2-5-7 — A Deep Learning-Based Kalman Filter for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Learning-Based Kalman Filter for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-7|PAPER Wed-3-10-7 — Controlling the Strength of Emotions in Speech-Like Emotional Sound Generated by WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controlling the Strength of Emotions in Speech-Like Emotional Sound Generated by WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2183.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-2|PAPER Wed-2-12-2 — Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-5|PAPER Wed-1-3-5 — Reformer-TTS: Neural Speech Synthesis with Reformer Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reformer-TTS: Neural Speech Synthesis with Reformer Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2524.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-9|PAPER Mon-2-1-9 — Speech Representation Learning for Emotion Recognition Using End-to-End ASR with Factorized Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Representation Learning for Emotion Recognition Using End-to-End ASR with Factorized Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">In Defence of Metric Learning for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1341.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-8|PAPER Thu-1-9-8 — Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-4|PAPER Wed-1-3-4 — Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2836.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-1|PAPER Wed-2-1-1 — The Implication of Sound Level on Spatial Selective Auditory Attention for Cochlear Implant Users: Behavioral and Electrophysiological Measurement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Implication of Sound Level on Spatial Selective Auditory Attention for Cochlear Implant Users: Behavioral and Electrophysiological Measurement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-5|PAPER Wed-2-10-5 — Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1578.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-3|PAPER Thu-3-11-3 — Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-8|PAPER Mon-3-7-8 — Black-Box Adaptation of ASR for Accented Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Black-Box Adaptation of ASR for Accented Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-2|PAPER Wed-2-8-2 — Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1356.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-7|PAPER Mon-2-1-7 — Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1212.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-1|PAPER Thu-SS-1-6-1 — Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-4|PAPER Mon-1-11-4 — What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1996.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-5|PAPER Wed-1-7-5 — Multimodal Association for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Association for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2963.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-3|PAPER Thu-2-3-3 — Rhythmic Convergence in Canadian French Varieties?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rhythmic Convergence in Canadian French Varieties?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-3|PAPER Thu-3-4-3 — Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-5|PAPER Thu-3-1-5 — Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-2|PAPER Mon-3-9-2 — iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-1|PAPER Mon-3-3-1 — Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-9|PAPER Tue-1-3-9 — An Effective Domain Adaptive Post-Training Method for BERT in Response Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Domain Adaptive Post-Training Method for BERT in Response Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2757.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-10|PAPER Wed-3-2-10 — All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2928.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-4|PAPER Thu-3-10-4 — Transformer-Based Long-Context End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer-Based Long-Context End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-6|PAPER Mon-2-12-6 — Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-8|PAPER Mon-2-12-8 — End-to-End Deep Learning Speech Recognition Model for Silent Speech Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Deep Learning Speech Recognition Model for Silent Speech Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-3|PAPER Mon-3-1-3 — Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-7|PAPER Mon-3-11-7 — Time-Domain Target-Speaker Speech Separation with Waveform-Based Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Time-Domain Target-Speaker Speech Separation with Waveform-Based Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-5|PAPER Wed-3-1-5 — Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-4|PAPER Thu-2-4-4 — Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3078.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-7|PAPER Tue-1-1-7 — Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1575.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-9|PAPER Mon-1-1-9 — Implicit Transfer of Privileged Acoustic Information in a Generalized Knowledge Distillation Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Implicit Transfer of Privileged Acoustic Information in a Generalized Knowledge Distillation Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3127.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-6|PAPER Thu-3-7-6 — Exploring TTS Without T Using Biologically/Psychologically Motivated Neural Network Modules (ZeroSpeech 2020)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring TTS Without T Using Biologically/Psychologically Motivated Neural Network Modules (ZeroSpeech 2020)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-9|PAPER Thu-1-8-9 — Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-1|PAPER Wed-3-8-1 — Multi-Stream Attention-Based BLSTM with Feature Segmentation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stream Attention-Based BLSTM with Feature Segmentation for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1159.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-1|PAPER Mon-3-10-1 — Two Different Mechanisms of Movable Mandible for Vocal-Tract Model with Flexible Tongue]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two Different Mechanisms of Movable Mandible for Vocal-Tract Model with Flexible Tongue</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-3|PAPER Wed-1-9-3 — Meta-Learning for Speech Emotion Recognition Considering Ambiguity of Emotion Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta-Learning for Speech Emotion Recognition Considering Ambiguity of Emotion Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-6|PAPER Wed-1-3-6 — CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-2|PAPER Thu-1-1-2 — Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-3|PAPER Wed-1-9-3 — Meta-Learning for Speech Emotion Recognition Considering Ambiguity of Emotion Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta-Learning for Speech Emotion Recognition Considering Ambiguity of Emotion Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-10|PAPER Wed-2-2-10 — Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-9|PAPER Wed-3-10-9 — Simultaneous Conversion of Speaker Identity and Emotion Based on Multiple-Domain Adaptive RBM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simultaneous Conversion of Speaker Identity and Emotion Based on Multiple-Domain Adaptive RBM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-8|PAPER Mon-1-1-8 — Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1089.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-8|PAPER Mon-1-8-8 — Neural Speech Separation Using Spatially Distributed Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Separation Using Spatially Distributed Microphones</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-3|PAPER Wed-2-8-3 — Serialized Output Training for End-to-End Overlapped Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Serialized Output Training for End-to-End Overlapped Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-3|PAPER Wed-2-6-3 — Speaker Dependent Articulatory-to-Acoustic Mapping Using Real-Time MRI of the Vocal Tract]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Dependent Articulatory-to-Acoustic Mapping Using Real-Time MRI of the Vocal Tract</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-4|PAPER Wed-2-6-4 — Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-2|PAPER Thu-1-5-2 — Speaker Dependent Acoustic-to-Articulatory Inversion Using Real-Time MRI of the Vocal Tract]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Dependent Acoustic-to-Articulatory Inversion Using Real-Time MRI of the Vocal Tract</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1672.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-5|PAPER Thu-1-5-5 — Quantification of Transducer Misalignment in Ultrasound Tongue Imaging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantification of Transducer Misalignment in Ultrasound Tongue Imaging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-6|PAPER Mon-1-10-6 — CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1771.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-8|PAPER Mon-2-1-8 — Emotion Profile Refinery for Speech Emotion Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emotion Profile Refinery for Speech Emotion Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-9|PAPER Mon-2-10-9 — Text-Independent Speaker Verification with Dual Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Text-Independent Speaker Verification with Dual Attention Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1762.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-7|PAPER Wed-1-9-7 — EigenEmo: Spectral Utterance Representation Using Dynamic Mode Decomposition for Speech Emotion Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EigenEmo: Spectral Utterance Representation Using Dynamic Mode Decomposition for Speech Emotion Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1779.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-8|PAPER Wed-1-9-8 — Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-8|PAPER Wed-3-10-8 — Learning Syllable-Level Discrete Prosodic Representation for Expressive Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Syllable-Level Discrete Prosodic Representation for Expressive Speech Generation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2145.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-6|PAPER Thu-2-10-6 — Automatic Detection of Phonological Errors in Child Speech Using Siamese Recurrent Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Phonological Errors in Child Speech Using Siamese Recurrent Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2233.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-5|PAPER Mon-SS-2-6-5 — The TalTech Systems for the Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The TalTech Systems for the Short-Duration Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-7|PAPER Mon-1-11-7 — Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2827.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-2|PAPER Mon-3-1-2 — Development of Multilingual ASR Using GlobalPhone for Less-Resourced Languages: The Case of Ethiopian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Multilingual ASR Using GlobalPhone for Less-Resourced Languages: The Case of Ethiopian Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2856.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-5|PAPER Mon-3-1-5 — Multilingual Acoustic and Language Modeling for Ethio-Semitic Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Acoustic and Language Modeling for Ethio-Semitic Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-4|PAPER Wed-SS-2-7-4 — Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2859.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-7|PAPER Thu-1-5-7 — CSL-EMG_Array: An Open Access Corpus for EMG-to-Speech Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CSL-EMG_Array: An Open Access Corpus for EMG-to-Speech Conversion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2829.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-9|PAPER Thu-SS-1-6-9 — Automatic Speech Recognition for ILSE-Interviews: Longitudinal Conversational Speech Recordings Covering Aging and Cognitive Decline]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Speech Recognition for ILSE-Interviews: Longitudinal Conversational Speech Recordings Covering Aging and Cognitive Decline</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1936.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-4|PAPER Thu-2-3-4 — Malayalam-English Code-Switched: Grapheme to Phoneme System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Malayalam-English Code-Switched: Grapheme to Phoneme System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-10|PAPER Wed-1-11-10 — NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1704.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-3|PAPER Mon-SS-2-6-3 — The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2947.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-4|PAPER Mon-1-1-4 — ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2947.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-4|PAPER Mon-1-1-4 — ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-11|PAPER Thu-1-11-11 — MultiSpeech: Multi-Speaker Text to Speech with Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MultiSpeech: Multi-Speaker Text to Speech with Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1824.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-7|PAPER Wed-3-4-7 — Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-6|PAPER Mon-2-7-6 — Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1754.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-3|PAPER Wed-2-11-3 — Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-3|PAPER Thu-1-11-3 — Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1737.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-4|PAPER Thu-1-11-4 — Bi-Level Speaker Supervision for One-Shot Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Level Speaker Supervision for One-Shot Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-6|PAPER Thu-3-4-6 — Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-6|PAPER Mon-2-11-6 — On Front-End Gain Invariant Modeling for Wake Word Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Front-End Gain Invariant Modeling for Wake Word Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-5|PAPER Wed-1-2-5 — Low Latency Speech Recognition Using End-to-End Prefetching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Speech Recognition Using End-to-End Prefetching</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-8|PAPER Thu-1-2-8 — Emitting Word Timings with End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emitting Word Timings with End-to-End Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1465.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-5|PAPER Thu-3-8-5 — Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2649.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-7|PAPER Wed-1-8-7 — Risk Forecasting from Earnings Calls Acoustics and Network Correlations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Risk Forecasting from Earnings Calls Acoustics and Network Correlations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2696.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-7|PAPER Thu-2-3-7 — Phonetic Entrainment in Cooperative Dialogues: A Case of Russian]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Entrainment in Cooperative Dialogues: A Case of Russian</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-9|PAPER Mon-2-11-9 — Iterative Pseudo-Labeling for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Pseudo-Labeling for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2512.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-8|PAPER Mon-2-7-8 — Attention-Based Speaker Embeddings for One-Shot Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Based Speaker Embeddings for One-Shot Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-2|PAPER Mon-2-1-2 — End-to-End Speech Emotion Recognition Combined with Acoustic-to-Word ASR Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Emotion Recognition Combined with Acoustic-to-Word ASR Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-7|PAPER Mon-2-2-7 — CTC-Synchronous Training for Monotonic Attention Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CTC-Synchronous Training for Monotonic Attention Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1780.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-7|PAPER Wed-1-5-7 — Enhancing Monotonic Multihead Attention for Streaming ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonic Multihead Attention for Streaming ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1195.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-6|PAPER Wed-2-6-6 — Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1179.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-3|PAPER Thu-1-3-3 — Distilling the Knowledge of BERT for Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distilling the Knowledge of BERT for Sequence-to-Sequence ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-6|PAPER Thu-1-9-6 — End-to-End Speech-to-Dialog-Act Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech-to-Dialog-Act Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2293.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-5|PAPER Thu-2-4-5 — Semi-Supervised Learning for Character Expression of Spoken Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning for Character Expression of Spoken Dialogue Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4002.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-1|PAPER Mon-2-12-1 — Smart Tube: A Biofeedback System for Vocal Training and Therapy Through Tube Phonation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Smart Tube: A Biofeedback System for Vocal Training and Therapy Through Tube Phonation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-4|PAPER Thu-1-9-4 — End-to-End Task-Oriented Dialog System Through Template Slot Value Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Task-Oriented Dialog System Through Template Slot Value Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-7|PAPER Thu-3-1-7 — Tongue and Lip Motion Patterns in Alaryngeal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tongue and Lip Motion Patterns in Alaryngeal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-6|PAPER Thu-3-3-6 — Detecting Audio Attacks on ASR Systems with Dropout Uncertainty]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Audio Attacks on ASR Systems with Dropout Uncertainty</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4008.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-4|PAPER Mon-2-12-4 — Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2304.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-7|PAPER Mon-3-8-7 — Speech Driven Talking Head Generation via Attentional Landmarks Based Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Talking Head Generation via Attentional Landmarks Based Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2234.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-8|PAPER Thu-1-10-8 — Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3040.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-9|PAPER Mon-1-10-9 — Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-8|PAPER Mon-1-2-8 — Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2404.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-7|PAPER Thu-1-3-7 — Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-8|PAPER Mon-1-2-8 — Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2404.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-7|PAPER Thu-1-3-7 — Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2267.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-4|PAPER Thu-3-6-4 — Dysarthric Speech Recognition Based on Deep Metric Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dysarthric Speech Recognition Based on Deep Metric Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1896.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-5|PAPER Thu-2-6-5 — Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1863.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-9|PAPER Wed-1-5-9 — High Performance Sequence-to-Sequence Model for Streaming Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Performance Sequence-to-Sequence Model for Streaming Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1186.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-2|PAPER Wed-2-2-2 — Predicting Detection Filters for Small Footprint Open-Vocabulary Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Detection Filters for Small Footprint Open-Vocabulary Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1896.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-5|PAPER Thu-2-6-5 — Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1491.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-1|PAPER Wed-3-9-1 — Accurate Detection of Wake Word Start and End Using a CNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Accurate Detection of Wake Word Start and End Using a CNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1186.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-2|PAPER Wed-2-2-2 — Predicting Detection Filters for Small Footprint Open-Vocabulary Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Detection Filters for Small Footprint Open-Vocabulary Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-1|PAPER Mon-1-12-1 — ICE-Talk: An Interface for a Controllable Expressive Talking Machine]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ICE-Talk: An Interface for a Controllable Expressive Talking Machine</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-3|PAPER Wed-3-10-3 — Laughter Synthesis: Combining Seq2seq Modeling with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Laughter Synthesis: Combining Seq2seq Modeling with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-13|PAPER Thu-2-9-13 — Interactive Text-to-Speech System via Joint Style Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interactive Text-to-Speech System via Joint Style Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1125.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-8|PAPER Wed-2-9-8 — Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2388.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-8|PAPER Wed-2-4-8 — Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2519.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-7|PAPER Wed-3-2-7 — Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1399.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-1|PAPER Mon-3-5-1 — Singing Synthesis: With a Little Help from my Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Synthesis: With a Little Help from my Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1251.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-1|PAPER Thu-2-9-1 — CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1411.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-5|PAPER Thu-2-9-5 — Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2758.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-2|PAPER Thu-3-1-2 — Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3135.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-4|PAPER Thu-3-1-4 — Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-6|PAPER Wed-3-1-6 — ASR-Free Pronunciation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Free Pronunciation Assessment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2542.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-9|PAPER Wed-3-5-9 — Neural Discriminant Analysis for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Discriminant Analysis for Deep Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2562.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-6|PAPER Thu-1-7-6 — Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1772.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-8|PAPER Tue-1-2-8 — Speaker Re-Identification with Speaker Dependent Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Re-Identification with Speaker Dependent Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-1|PAPER Tue-1-9-1 — Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1774.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-4|PAPER Wed-2-12-4 — Weakly Supervised Training of Hierarchical Attention Networks for Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weakly Supervised Training of Hierarchical Attention Networks for Speaker Identification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-2|PAPER Thu-2-2-2 — Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-8|PAPER Thu-2-2-8 — Empirical Interpretation of Speech Emotion Perception with Attention Based Model for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Empirical Interpretation of Speech Emotion Perception with Attention Based Model for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1885.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-4|PAPER Thu-3-2-4 — Exploration of Audio Quality Assessment and Anomaly Localisation Using Attention Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of Audio Quality Assessment and Anomaly Localisation Using Attention Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2739.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-6|PAPER Thu-3-5-6 — Multilingual Speech Recognition Using Language-Specific Phoneme Recognition as Auxiliary Task for Indian Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition Using Language-Specific Phoneme Recognition as Auxiliary Task for Indian Languages</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1785.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-8|PAPER Thu-3-7-8 — Unsupervised Acoustic Unit Representation Learning for Voice Conversion Using WaveNet Auto-Encoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Representation Learning for Voice Conversion Using WaveNet Auto-Encoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2103.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-6|PAPER Mon-1-5-6 — What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-5|PAPER Thu-3-3-5 — Privacy Guarantees for De-Identifying Text Transformations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy Guarantees for De-Identifying Text Transformations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2833.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-10|PAPER Wed-SS-1-6-10 — The INESC-ID Multi-Modal System for the ADReSS 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INESC-ID Multi-Modal System for the ADReSS 2020 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2729.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-8|PAPER Wed-SS-1-6-8 — Comparing Natural Language Processing Techniques for Alzheimer’s Dementia Prediction in Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparing Natural Language Processing Techniques for Alzheimer’s Dementia Prediction in Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-7|PAPER Thu-3-11-7 — Acoustic Signal Enhancement Using Relative Harmonic Coefficients: Spherical Harmonics Domain Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Signal Enhancement Using Relative Harmonic Coefficients: Spherical Harmonics Domain Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-4|PAPER Wed-2-11-4 — Phonological Features for 0-Shot Multilingual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Features for 0-Shot Multilingual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-6|PAPER Wed-3-4-6 — Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-1|PAPER Thu-2-7-1 — Speaker-Utterance Dual Attention for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Utterance Dual Attention for Speaker and Utterance Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-3|PAPER Thu-1-4-3 — Polishing the Classical Likelihood Ratio Test by Supervised Learning for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Polishing the Classical Likelihood Ratio Test by Supervised Learning for Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-1|PAPER Mon-2-10-1 — AutoSpeech: Neural Architecture Search for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech: Neural Architecture Search for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2490.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-3|PAPER Mon-1-3-3 — Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-3|PAPER Wed-3-2-3 — Competing Speaker Count Estimation on the Fusion of the Spectral and Spatial Embedding Space]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Competing Speaker Count Estimation on the Fusion of the Spectral and Spatial Embedding Space</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-8|PAPER Mon-1-1-8 — Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-6|PAPER Thu-1-9-6 — End-to-End Speech-to-Dialog-Act Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech-to-Dialog-Act Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-2|PAPER Wed-3-1-2 — An Effective End-to-End Modeling Approach for Mispronunciation Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective End-to-End Modeling Approach for Mispronunciation Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2709.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-8|PAPER Mon-3-10-8 — An Investigation of the Virtual Lip Trajectories During the Production of Bilabial Stops and Nasal at Different Speaking Rates]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of the Virtual Lip Trajectories During the Production of Bilabial Stops and Nasal at Different Speaking Rates</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2560.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-6|PAPER Mon-1-1-6 — BLSTM-Driven Stream Fusion for Automatic Speech Recognition: Novel Methods and a Multi-Size Window Fusion Example]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BLSTM-Driven Stream Fusion for Automatic Speech Recognition: Novel Methods and a Multi-Size Window Fusion Example</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2439.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-2|PAPER Wed-SS-1-12-2 — INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising]]</div>|^<div class="cpauthorindexpersoncardpapertitle">INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2588.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-5|PAPER Thu-2-11-5 — Speech Enhancement with Stochastic Temporal Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Stochastic Temporal Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2560.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-6|PAPER Mon-1-1-6 — BLSTM-Driven Stream Fusion for Automatic Speech Recognition: Novel Methods and a Multi-Size Window Fusion Example]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BLSTM-Driven Stream Fusion for Automatic Speech Recognition: Novel Methods and a Multi-Size Window Fusion Example</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2942.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-7|PAPER Tue-1-10-7 — Lexical Stress in Urdu]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lexical Stress in Urdu</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1244.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-9|PAPER Thu-2-8-9 — Context-Dependent Acoustic Modeling Without Explicit Phone Clustering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Acoustic Modeling Without Explicit Phone Clustering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1436.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-2|PAPER Mon-3-11-2 — Atss-Net: Target Speaker Separation via Attention-Based Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Atss-Net: Target Speaker Separation via Attention-Based Neural Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1915.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-3|PAPER Wed-SS-2-3-3 — The DKU Speech Activity Detection and Speaker Identification Systems for Fearless Steps Challenge Phase-02]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU Speech Activity Detection and Speaker Identification Systems for Fearless Steps Challenge Phase-02</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-4|PAPER Wed-1-8-4 — S2IGAN: Speech-to-Image Generation via Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">S2IGAN: Speech-to-Image Generation via Adversarial Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1682.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-6|PAPER Mon-1-8-6 — Quaternion Neural Networks for Multi-Channel Distant Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quaternion Neural Networks for Multi-Channel Distant Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2102.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-8|PAPER Tue-1-5-8 — FusionRNN: Shared Neural Parameters for Multi-Channel Distant Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FusionRNN: Shared Neural Parameters for Multi-Channel Distant Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-6|PAPER Wed-3-5-6 — Compact Speaker Embedding: lrx-Vector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compact Speaker Embedding: lrx-Vector</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2872.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-8|PAPER Thu-1-7-8 — Length- and Noise-Aware Training Techniques for Short-Utterance Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Length- and Noise-Aware Training Techniques for Short-Utterance Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2722.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-1|PAPER Thu-2-8-1 — State Sequence Pooling Training of Acoustic Models for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State Sequence Pooling Training of Acoustic Models for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-12|PAPER Wed-SS-1-4-12 — Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1125.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-8|PAPER Wed-2-9-8 — Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-9|PAPER Thu-1-8-9 — Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1067.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-1|PAPER Wed-2-9-1 — Fundamental Frequency Model for Postfiltering at Low Bitrates in a Transform-Domain Speech and Audio Codec]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fundamental Frequency Model for Postfiltering at Low Bitrates in a Transform-Domain Speech and Audio Codec</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2299.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-2|PAPER Thu-3-3-2 — Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1437.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-5|PAPER Thu-1-1-5 — StrawNet: Self-Training WaveNet for TTS in Low-Data Regimes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StrawNet: Self-Training WaveNet for TTS in Low-Data Regimes</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1430.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-6|PAPER Thu-2-9-6 — Improving the Prosody of RNN-Based English Text-To-Speech Synthesis by Incorporating a BERT Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving the Prosody of RNN-Based English Text-To-Speech Synthesis by Incorporating a BERT Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-6|PAPER Wed-1-2-6 — AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2568.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-8|PAPER Wed-2-2-8 — An Investigation of Few-Shot Learning in Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Few-Shot Learning in Spoken Term Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-4|PAPER Wed-SS-1-4-4 — Surgical Mask Detection with Deep Recurrent Phonetic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Surgical Mask Detection with Deep Recurrent Phonetic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2679.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-10|PAPER Wed-2-11-10 — One Model, Many Languages: Meta-Learning for Multilingual Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One Model, Many Languages: Meta-Learning for Multilingual Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-3|PAPER Wed-3-5-3 — A Comparative Re-Assessment of Feature Extractors for Deep Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Re-Assessment of Feature Extractors for Deep Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1052.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-1|PAPER Thu-SS-2-5-1 — The Attacker’s Perspective on Automatic Speaker Verification: An Overview]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Attacker’s Perspective on Automatic Speaker Verification: An Overview</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1090.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-2|PAPER Thu-SS-2-5-2 — Extrapolating False Alarm Rates in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extrapolating False Alarm Rates in Automatic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-5|PAPER Thu-2-7-5 — Why Did the x-Vector System Miss a Target Speaker? Impact of Acoustic Mismatch Upon Target Score on VoxCeleb Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Why Did the x-Vector System Miss a Target Speaker? Impact of Acoustic Mismatch Upon Target Score on VoxCeleb Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2706.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-5|PAPER Mon-1-11-5 — Releasing a Toolkit and Comparing the Performance of Language Embeddings Across Various Spoken Language Identification Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Releasing a Toolkit and Comparing the Performance of Language Embeddings Across Various Spoken Language Identification Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2138.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-9|PAPER Mon-1-2-9 — Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1591.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-8|PAPER Mon-3-3-8 — Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2388.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-8|PAPER Wed-2-4-8 — Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2519.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-7|PAPER Wed-3-2-7 — Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-7|PAPER Tue-1-3-7 — Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-8|PAPER Wed-2-8-8 — Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-4|PAPER Thu-2-4-4 — Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-2|PAPER Thu-1-1-2 — Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-7|PAPER Thu-2-1-7 — Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1066.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-1|PAPER Thu-3-4-1 — Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-7|PAPER Thu-3-7-7 — Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3167.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-9|PAPER Wed-1-3-9 — Multi-Speaker Text-to-Speech Synthesis Using Deep Gaussian Processes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Speaker Text-to-Speech Synthesis Using Deep Gaussian Processes</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-5|PAPER Wed-2-11-5 — Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2469.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-9|PAPER Wed-3-4-9 — Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-2|PAPER Thu-1-1-2 — Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1072.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-3|PAPER Thu-1-1-3 — A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-3|PAPER Thu-2-1-3 — Semi-Supervised Self-Produced Speech Enhancement and Suppression Based on Joint Source Modeling of Air- and Body-Conducted Signals Using Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Self-Produced Speech Enhancement and Suppression Based on Joint Source Modeling of Air- and Body-Conducted Signals Using Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-7|PAPER Thu-2-1-7 — Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1066.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-1|PAPER Thu-3-4-1 — Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-7|PAPER Thu-3-7-7 — Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1552.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-2|PAPER Wed-SS-1-4-2 — Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-8|PAPER Thu-2-8-8 — Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-9|PAPER Wed-1-2-9 — Neural Architecture Search for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1934.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-3|PAPER Thu-2-4-3 — Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-1|PAPER Wed-2-10-1 — Transfer Learning of Articulatory Information Through Phone Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning of Articulatory Information Through Phone Information</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1140.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-2|PAPER Wed-2-10-2 — Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1964.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-3|PAPER Wed-1-3-3 — Complex-Valued Variational Autoencoder: A Novel Deep Generative Model for Direct Representation of Complex Spectra]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Complex-Valued Variational Autoencoder: A Novel Deep Generative Model for Direct Representation of Complex Spectra</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-9|PAPER Wed-3-10-9 — Simultaneous Conversion of Speaker Identity and Emotion Based on Multiple-Domain Adaptive RBM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simultaneous Conversion of Speaker Identity and Emotion Based on Multiple-Domain Adaptive RBM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1591.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-8|PAPER Mon-3-3-8 — Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1081.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-3|PAPER Wed-2-1-3 — Speech Clarity Improvement by Vocal Self-Training Using a Hearing Impairment Simulator and its Correlation with an Auditory Modulation Index]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Clarity Improvement by Vocal Self-Training Using a Hearing Impairment Simulator and its Correlation with an Auditory Modulation Index</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2433.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-7|PAPER Wed-2-1-7 — Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-8|PAPER Thu-2-6-8 — Analysis of Disfluency in Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Disfluency in Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-5|PAPER Wed-1-2-5 — Low Latency Speech Recognition Using End-to-End Prefetching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Speech Recognition Using End-to-End Prefetching</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-8|PAPER Thu-1-2-8 — Emitting Word Timings with End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emitting Word Timings with End-to-End Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-8|PAPER Mon-1-7-8 — Spot the Conversation: Speaker Diarisation in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Conversation: Speaker Diarisation in the Wild</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-9|PAPER Wed-1-10-9 — Now You’re Speaking My Language: Visual Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Now You’re Speaking My Language: Visual Language Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1163.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-1|PAPER Wed-2-11-1 — Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2210.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-8|PAPER Mon-3-11-8 — Listen to What You Want: Neural Network-Based Universal Sound Selector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen to What You Want: Neural Network-Based Universal Sound Selector</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-6|PAPER Mon-3-3-6 — Improving Speech Intelligibility Through Speaker Dependent and Independent Spectral Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Intelligibility Through Speaker Dependent and Independent Spectral Style Conversion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1196.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-1|PAPER Thu-3-6-1 — Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2861.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-10|PAPER Thu-2-9-10 — Controllable Neural Text-to-Speech Synthesis Using Intuitive Prosodic Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controllable Neural Text-to-Speech Synthesis Using Intuitive Prosodic Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1878.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-4|PAPER Wed-1-2-4 — Learnable Spectro-Temporal Receptive Fields for Robust Voice Type Discrimination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learnable Spectro-Temporal Receptive Fields for Robust Voice Type Discrimination</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-6|PAPER Wed-2-12-6 — Unsupervised Training of Siamese Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Training of Siamese Networks for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2730.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-5|PAPER Wed-SS-1-12-5 — A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-6|PAPER Wed-SS-1-12-6 — PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3019.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-8|PAPER Mon-1-4-8 — Robust Pitch Regression with Voiced/Unvoiced Classification in Nonstationary Noise Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Pitch Regression with Voiced/Unvoiced Classification in Nonstationary Noise Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-12|PAPER Wed-SS-1-6-12 — Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-7|PAPER Wed-3-3-7 — Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-1|PAPER Tue-1-3-1 — Modeling ASR Ambiguity for Neural Dialogue State Tracking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling ASR Ambiguity for Neural Dialogue State Tracking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1582.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-7|PAPER Wed-1-10-7 — Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1994.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-8|PAPER Thu-2-1-8 — Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2612.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-7|PAPER Tue-1-4-7 — Mandarin and English Adults’ Cue-Weighting of Lexical Stress]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin and English Adults’ Cue-Weighting of Lexical Stress</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2599.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-5|PAPER Thu-3-6-5 — Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2674.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-3|PAPER Thu-2-10-3 — Audiovisual Correspondence Learning in Humans and Machines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audiovisual Correspondence Learning in Humans and Machines</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1251.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-1|PAPER Thu-2-9-1 — CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-1|PAPER Wed-3-2-1 — Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-3|PAPER Thu-1-7-3 — Training Speaker Enrollment Models by Network Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Speaker Enrollment Models by Network Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2883.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-10|PAPER Mon-3-3-10 — Large Scale Evaluation of Importance Maps in Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Evaluation of Importance Maps in Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-6|PAPER Thu-1-9-6 — End-to-End Speech-to-Dialog-Act Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech-to-Dialog-Act Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-9|PAPER Mon-SS-2-6-9 — Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-6|PAPER Thu-2-7-6 — Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-10|PAPER Wed-1-5-10 — Transfer Learning Approaches for Streaming End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning Approaches for Streaming End-to-End Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1988.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-5|PAPER Mon-1-10-5 — Design and Development of a Human-Machine Dialog Corpus for the Automated Assessment of Conversational English Proficiency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design and Development of a Human-Machine Dialog Corpus for the Automated Assessment of Conversational English Proficiency</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1449.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-2|PAPER Wed-1-2-2 — A Low Latency ASR-Free End to End Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Low Latency ASR-Free End to End Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1090.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-2|PAPER Thu-SS-2-5-2 — Extrapolating False Alarm Rates in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extrapolating False Alarm Rates in Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2048.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-3|PAPER Thu-1-10-3 — SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-4|PAPER Mon-3-8-4 — Stochastic Talking Face Generation Using Latent Distribution Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stochastic Talking Face Generation Using Latent Distribution Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1297.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-3|PAPER Thu-2-9-3 — Transfer Learning of the Expressivity Using FLOW Metric Learning in Multispeaker Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning of the Expressivity Using FLOW Metric Learning in Multispeaker Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-1|PAPER Tue-1-10-1 — Correlating Cepstra with Formant Frequencies: Implications for Phonetically-Informed Forensic Voice Comparison]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Correlating Cepstra with Formant Frequencies: Implications for Phonetically-Informed Forensic Voice Comparison</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-10|PAPER Wed-2-6-10 — MLS: A Large-Scale Multilingual Dataset for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLS: A Large-Scale Multilingual Dataset for Speech Research</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2831.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-5|PAPER Thu-3-5-5 — Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1330.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-2|PAPER Wed-3-9-2 — Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3122.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-8|PAPER Thu-2-11-8 — Improved Speech Enhancement Using TCN with Multiple Encoder-Decoder Layers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Enhancement Using TCN with Multiple Encoder-Decoder Layers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-4|PAPER Mon-2-8-4 — Memory Controlled Sequential Self Attention for Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Memory Controlled Sequential Self Attention for Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1356.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-7|PAPER Mon-2-1-7 — Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2239.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-6|PAPER Wed-2-1-6 — Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1431.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-7|PAPER Thu-3-9-7 — Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-5|PAPER Mon-1-1-5 — Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3093.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-8|PAPER Wed-2-1-8 — UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-6|PAPER Mon-1-4-6 — Enhancing Formant Information in Spectrographic Display of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Formant Information in Spectrographic Display of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2831.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-5|PAPER Thu-3-5-5 — Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3115.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-9|PAPER Wed-2-4-9 — Unsupervised Audio Source Separation Using Generative Priors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Audio Source Separation Using Generative Priors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2224.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-4|PAPER Thu-2-1-4 — Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2256.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-6|PAPER Thu-3-11-6 — The Importance of Time-Frequency Averaging for Binaural Speaker Localization in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Importance of Time-Frequency Averaging for Binaural Speaker Localization in Reverberant Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-4|PAPER Mon-3-3-4 — ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2514.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-8|PAPER Thu-3-8-8 — Improving Speech Recognition of Compound-Rich Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition of Compound-Rich Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1890.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-4|PAPER Thu-1-8-4 — Universal Adversarial Attacks on Spoken Language Assessment Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Attacks on Spoken Language Assessment Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1977.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-1|PAPER Thu-3-3-1 — Distributed Summation Privacy for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distributed Summation Privacy for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-7|PAPER Wed-3-3-7 — Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-7|PAPER Wed-3-3-7 — Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2907.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-6|PAPER Tue-1-3-6 — Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-3|PAPER Thu-3-2-3 — Investigating the Visual Lombard Effect with Gabor Based Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Visual Lombard Effect with Gabor Based Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-3|PAPER Mon-1-4-3 — Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-1|PAPER Mon-1-8-1 — Learning Contextual Language Embeddings for Monaural Multi-Talker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Contextual Language Embeddings for Monaural Multi-Talker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-5|PAPER Mon-1-8-5 — End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-4|PAPER Thu-1-4-4 — A Noise Robust Technique for Detecting Vowels in Speech Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Noise Robust Technique for Detecting Vowels in Speech Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1112.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-10|PAPER Thu-2-8-10 — Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-12|PAPER Wed-SS-1-6-12 — Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3109.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-9|PAPER Wed-3-1-9 — Recognize Mispronunciations to Improve Non-Native Acoustic Modeling Through a Phone Decoder Built from One Edit Distance Finite State Automaton]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognize Mispronunciations to Improve Non-Native Acoustic Modeling Through a Phone Decoder Built from One Edit Distance Finite State Automaton</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-2|PAPER Wed-3-3-2 — Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2073.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-4|PAPER Wed-1-1-4 — Integrating the Application and Realization of Mandarin 3rd Tone Sandhi in the Resolution of Sentence Ambiguity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Integrating the Application and Realization of Mandarin 3rd Tone Sandhi in the Resolution of Sentence Ambiguity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-5|PAPER Wed-1-2-5 — Low Latency Speech Recognition Using End-to-End Prefetching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Speech Recognition Using End-to-End Prefetching</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-4|PAPER Wed-1-5-4 — Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1249.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-1|PAPER Wed-SS-3-11-1 — The INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-8|PAPER Mon-1-5-8 — Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-3|PAPER Mon-1-4-3 — Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2738.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-7|PAPER Wed-1-7-7 — Cross-Domain Adaptation with Discrepancy Minimization for Text-Independent Forensic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Adaptation with Discrepancy Minimization for Text-Independent Forensic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2868.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-8|PAPER Wed-1-7-8 — Open-Set Short Utterance Forensic Speaker Verification Using Teacher-Student Network with Explicit Inductive Bias]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Open-Set Short Utterance Forensic Speaker Verification Using Teacher-Student Network with Explicit Inductive Bias</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1845.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-4|PAPER Wed-3-5-4 — Speaker Representation Learning Using Global Context Guided Channel and Time-Frequency Transformations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Representation Learning Using Global Context Guided Channel and Time-Frequency Transformations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2048.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-3|PAPER Thu-1-10-3 — SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-5|PAPER Wed-SS-3-11-5 — The JD AI Speaker Verification System for the FFSVC 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JD AI Speaker Verification System for the FFSVC 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2048.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-3|PAPER Thu-1-10-3 — SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-10|PAPER Thu-3-11-10 — Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1958.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-1|PAPER Tue-1-8-1 — Robust Beam Search for Encoder-Decoder Attention Based Speech Recognition Without Length Bias]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Beam Search for Encoder-Decoder Attention Based Speech Recognition Without Length Bias</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2359.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-5|PAPER Wed-3-12-5 — TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2636.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-1|PAPER Wed-1-9-1 — An Efficient Temporal Modeling Approach for Speech Emotion Recognition by Mapping Varied Duration Sentences into Fixed Number of Chunks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Efficient Temporal Modeling Approach for Speech Emotion Recognition by Mapping Varied Duration Sentences into Fixed Number of Chunks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-7|PAPER Mon-1-4-7 — Unsupervised Methods for Evaluating Speech Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Methods for Evaluating Speech Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3084.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-8|PAPER Thu-SS-1-6-8 — A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-4|PAPER Wed-1-11-4 — Constrained Ratio Mask for Speech Enhancement Using DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Ratio Mask for Speech Enhancement Using DNN</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-8|PAPER Wed-2-5-8 — Subband Kalman Filtering with DNN Estimated Parameters for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subband Kalman Filtering with DNN Estimated Parameters for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2500.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-2|PAPER Thu-2-1-2 — Adversarial Dictionary Learning for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Dictionary Learning for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2613.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-9|PAPER Wed-2-2-9 — End-to-End Keyword Search Based on Attention and Energy Scorer for Low Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Keyword Search Based on Attention and Energy Scorer for Low Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-6|PAPER Wed-1-2-6 — AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-3|PAPER Thu-3-4-3 — Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-9|PAPER Thu-3-4-9 — Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-5|PAPER Mon-1-2-5 — Differential Beamforming for Uniform Circular Array with Directional Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Differential Beamforming for Uniform Circular Array with Directional Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-5|PAPER Mon-3-4-5 — Acoustic Scene Analysis with Multi-Head Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Analysis with Multi-Head Attention Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2572.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-3|PAPER Thu-2-2-3 — Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-5|PAPER Mon-3-4-5 — Acoustic Scene Analysis with Multi-Head Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Analysis with Multi-Head Attention Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-3|PAPER Tue-1-8-3 — An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-1|PAPER Wed-2-8-1 — Semi-Supervised ASR by End-to-End Self-Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised ASR by End-to-End Self-Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-8|PAPER Mon-2-9-8 — Towards an ASR Error Robust Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards an ASR Error Robust Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1287.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-1|PAPER Wed-3-5-1 — Wav2Spk: A Simple DNN Architecture for Learning Speaker Embeddings from Waveforms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Wav2Spk: A Simple DNN Architecture for Learning Speaker Embeddings from Waveforms</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-7-4|PAPER Thu-2-7-4 — Strategies for End-to-End Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Strategies for End-to-End Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-1|PAPER Mon-2-10-1 — AutoSpeech: Neural Architecture Search for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech: Neural Architecture Search for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1192.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-2|PAPER Mon-2-5-2 — CAM: Uninteresting Speech Detector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CAM: Uninteresting Speech Detector</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-13|PAPER Thu-2-9-13 — Interactive Text-to-Speech System via Joint Style Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interactive Text-to-Speech System via Joint Style Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1849.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-5|PAPER Thu-1-3-5 — Investigation of Large-Margin Softmax in Neural Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Large-Margin Softmax in Neural Language Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-4|PAPER Mon-1-7-4 — New Advances in Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">New Advances in Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0056.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-6|PAPER Tue-1-4-6 — A Perceptual Study of the Five Level Tones in Hmu (Xinzhai Variety)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptual Study of the Five Level Tones in Hmu (Xinzhai Variety)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1066.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-1|PAPER Thu-3-4-1 — Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-3|PAPER Thu-SS-2-5-3 — Self-Supervised Spoofing Audio Detection Scheme]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Spoofing Audio Detection Scheme</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-2|PAPER Thu-3-10-2 — Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-2|PAPER Wed-SS-2-3-2 — Speaker Diarization System Based on DPCA Algorithm for Fearless Steps Challenge Phase-2]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization System Based on DPCA Algorithm for Fearless Steps Challenge Phase-2</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2872.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-8|PAPER Thu-1-7-8 — Length- and Noise-Aware Training Techniques for Short-Utterance Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Length- and Noise-Aware Training Techniques for Short-Utterance Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2695.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-1|PAPER Wed-1-1-1 — Interaction of Tone and Voicing in Mizo]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interaction of Tone and Voicing in Mizo</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-7|PAPER Thu-3-1-7 — Tongue and Lip Motion Patterns in Alaryngeal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tongue and Lip Motion Patterns in Alaryngeal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-1|PAPER Mon-1-2-1 — Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-2|PAPER Wed-3-8-2 — Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-3|PAPER Mon-3-7-3 — Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2105.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-6|PAPER Mon-2-5-6 — A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-9|PAPER Thu-2-10-9 — Cross-Linguistic Perception of Utterances with Willingness and Reluctance in Mandarin by Korean L2 Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Linguistic Perception of Utterances with Willingness and Reluctance in Mandarin by Korean L2 Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3231.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-8|PAPER Thu-2-3-8 — Prosodic Characteristics of Genuine and Mock (Im)polite Mandarin Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Characteristics of Genuine and Mock (Im)polite Mandarin Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2304.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-7|PAPER Mon-3-8-7 — Speech Driven Talking Head Generation via Attentional Landmarks Based Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Talking Head Generation via Attentional Landmarks Based Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1197.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-1|PAPER Mon-2-8-1 — Gated Multi-Head Attention Pooling for Weakly Labelled Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gated Multi-Head Attention Pooling for Weakly Labelled Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-2|PAPER Mon-2-8-2 — Environmental Sound Classification with Parallel Temporal-Spectral Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environmental Sound Classification with Parallel Temporal-Spectral Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-3|PAPER Mon-3-1-3 — Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-4|PAPER Thu-2-4-4 — Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-2|PAPER Thu-3-10-2 — Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2479.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-9|PAPER Thu-1-10-9 — Improving Partition-Block-Based Acoustic Echo Canceler in Under-Modeling Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Partition-Block-Based Acoustic Echo Canceler in Under-Modeling Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2675.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-6|PAPER Thu-1-2-6 — Early Stage LM Integration Using Local and Global Log-Linear Combination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Early Stage LM Integration Using Local and Global Log-Linear Combination</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-3|PAPER Mon-3-7-3 — Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1980.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-10|PAPER Thu-3-5-10 — Towards Context-Aware End-to-End Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Context-Aware End-to-End Code-Switching Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-8|PAPER Tue-1-4-8 — Age-Related Differences of Tone Perception in Mandarin-Speaking Seniors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Age-Related Differences of Tone Perception in Mandarin-Speaking Seniors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-4|PAPER Thu-3-11-4 — Online Blind Reverberation Time Estimation Using CRNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Blind Reverberation Time Estimation Using CRNNs</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2171.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-5|PAPER Thu-3-11-5 — Single-Channel Blind Direct-to-Reverberation Ratio Estimation Using Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single-Channel Blind Direct-to-Reverberation Ratio Estimation Using Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-7|PAPER Wed-SS-1-4-7 — Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1246.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-7|PAPER Mon-2-9-7 — Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2123.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-7|PAPER Thu-1-11-7 — JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3163.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-10|PAPER Mon-1-1-10 — Effect of Adding Positional Information on Convolutional Neural Networks for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effect of Adding Positional Information on Convolutional Neural Networks for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2183.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2439.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-2|PAPER Wed-SS-1-12-2 — INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising]]</div>|^<div class="cpauthorindexpersoncardpapertitle">INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-7|PAPER Mon-2-11-7 — Unsupervised Regularization-Based Adaptive Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Regularization-Based Adaptive Training for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1390.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-5|PAPER Mon-3-7-5 — Adaptive Speaker Normalization for CTC-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Speaker Normalization for CTC-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-3|PAPER Tue-1-2-3 — An Adaptive X-Vector Model for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive X-Vector Model for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-2|PAPER Mon-2-10-2 — Densely Connected Time Delay Neural Network for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Densely Connected Time Delay Neural Network for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2359.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-5|PAPER Wed-3-12-5 — TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2408.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-7|PAPER Wed-3-8-7 — A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2549.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-10|PAPER Thu-2-3-10 — F0 Patterns in Mandarin Statements of Mandarin and Cantonese Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Patterns in Mandarin Statements of Mandarin and Cantonese Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-2|PAPER Tue-1-8-2 — Transformer with Bidirectional Decoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer with Bidirectional Decoder for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1463.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-6|PAPER Thu-1-1-6 — An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-10|PAPER Wed-1-7-10 — Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1539.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-6|PAPER Wed-2-5-6 — Sub-Band Knowledge Distillation Framework for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Knowledge Distillation Framework for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2371.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-7|PAPER Wed-2-4-7 — Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2359.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-5|PAPER Wed-3-12-5 — TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1454.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-5|PAPER Thu-1-10-5 — Generative Adversarial Network Based Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Network Based Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1539.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-6|PAPER Wed-2-5-6 — Sub-Band Knowledge Distillation Framework for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Knowledge Distillation Framework for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2572.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-3|PAPER Thu-2-2-3 — Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-1|PAPER Mon-3-4-1 — Neural Architecture Search on Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search on Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1751.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-5|PAPER Wed-3-4-5 — Enhancing Monotonicity for Robust Autoregressive Transformer TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonicity for Robust Autoregressive Transformer TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-8|PAPER Wed-1-10-8 — Perception and Production of Mandarin Initial Stops by Native Urdu Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception and Production of Mandarin Initial Stops by Native Urdu Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-6|PAPER Wed-2-11-6 — Tone Learning in Low-Resource Bilingual TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tone Learning in Low-Resource Bilingual TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2568.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-8|PAPER Wed-2-2-8 — An Investigation of Few-Shot Learning in Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Few-Shot Learning in Spoken Term Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-2|PAPER Thu-3-10-2 — Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-1|PAPER Wed-2-2-1 — Depthwise Separable Convolutional ResNet with Squeeze-and-Excitation Blocks for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Depthwise Separable Convolutional ResNet with Squeeze-and-Excitation Blocks for Small-Footprint Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-10|PAPER Wed-1-11-10 — NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-10|PAPER Thu-2-6-10 — Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-8|PAPER Mon-1-5-8 — Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-5|PAPER Wed-SS-3-11-5 — The JD AI Speaker Verification System for the FFSVC 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JD AI Speaker Verification System for the FFSVC 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-10|PAPER Thu-3-11-10 — Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-3|PAPER Wed-1-11-3 — A Recursive Network with Dynamic Attention for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Recursive Network with Dynamic Attention for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-10|PAPER Wed-1-2-10 — Small-Footprint Keyword Spotting with Multi-Scale Temporal Convolution]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Small-Footprint Keyword Spotting with Multi-Scale Temporal Convolution</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2091.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-1|PAPER Wed-SS-1-12-1 — Online Monaural Speech Enhancement Using Delayed Subband LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Monaural Speech Enhancement Using Delayed Subband LSTM</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1539.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-6|PAPER Wed-2-5-6 — Sub-Band Knowledge Distillation Framework for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Knowledge Distillation Framework for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-8|PAPER Mon-1-1-8 — Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-3|PAPER Wed-2-8-3 — Serialized Output Training for End-to-End Overlapped Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Serialized Output Training for End-to-End Overlapped Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2572.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-3|PAPER Thu-2-2-3 — Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1052.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-1|PAPER Thu-SS-2-5-1 — The Attacker’s Perspective on Automatic Speaker Verification: An Overview]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Attacker’s Perspective on Automatic Speaker Verification: An Overview</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1820.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-6|PAPER Thu-2-4-6 — Dimensional Emotion Prediction Based on Interactive Context in Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dimensional Emotion Prediction Based on Interactive Context in Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-3|PAPER Mon-2-11-3 — Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1422.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-5|PAPER Mon-2-10-5 — Vector-Based Attentive Pooling for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Based Attentive Pooling for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-3|PAPER Mon-2-12-3 — A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4012.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-7|PAPER Mon-2-12-7 — A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-1|PAPER Wed-3-1-1 — Automatic Scoring at Multi-Granularity for L2 Pronunciation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Scoring at Multi-Granularity for L2 Pronunciation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1284.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-2|PAPER Thu-2-9-2 — Joint Detection of Sentence Stress and Phrase Boundary for Prosody]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Detection of Sentence Stress and Phrase Boundary for Prosody</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1220.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-2|PAPER Thu-3-11-2 — Spatial Resolution of Early Reflection for Speech and White Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Resolution of Early Reflection for Speech and White Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-9|PAPER Wed-2-5-9 — Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-10|PAPER Wed-1-2-10 — Small-Footprint Keyword Spotting with Multi-Scale Temporal Convolution]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Small-Footprint Keyword Spotting with Multi-Scale Temporal Convolution</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2624.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-8|PAPER Wed-3-8-8 — Meta Multi-Task Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta Multi-Task Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-10|PAPER Mon-2-10-10 — Evolutionary Algorithm Enhanced Neural Architecture Search for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evolutionary Algorithm Enhanced Neural Architecture Search for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1249.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-1|PAPER Wed-SS-3-11-1 — The INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-6|PAPER Wed-1-2-6 — AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-3|PAPER Wed-3-2-3 — Competing Speaker Count Estimation on the Fusion of the Spectral and Spatial Embedding Space]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Competing Speaker Count Estimation on the Fusion of the Spectral and Spatial Embedding Space</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-10|PAPER Wed-1-2-10 — Small-Footprint Keyword Spotting with Multi-Scale Temporal Convolution]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Small-Footprint Keyword Spotting with Multi-Scale Temporal Convolution</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2568.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-8|PAPER Wed-2-2-8 — An Investigation of Few-Shot Learning in Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Few-Shot Learning in Spoken Term Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Introducing the VoicePrivacy Initiative</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-1|PAPER Wed-1-3-1 — Using Cyclic Noise as the Source Signal for Neural Source-Filter-Based Speech Waveform Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Cyclic Noise as the Source Signal for Neural Source-Filter-Based Speech Waveform Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1613.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-7|PAPER Thu-1-1-7 — Reverberation Modeling for Source-Filter-Based Neural Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reverberation Modeling for Source-Filter-Based Neural Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1682.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-6|PAPER Mon-1-8-6 — Quaternion Neural Networks for Multi-Channel Distant Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quaternion Neural Networks for Multi-Channel Distant Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2102.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-8|PAPER Tue-1-5-8 — FusionRNN: Shared Neural Parameters for Multi-Channel Distant Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FusionRNN: Shared Neural Parameters for Multi-Channel Distant Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-9|PAPER Mon-2-2-9 — SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-3|PAPER Thu-SS-1-6-3 — Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2516.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-2|PAPER Wed-SS-1-6-2 — Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1766.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-3|PAPER Thu-1-8-3 — Targeted Content Feedback in Spoken Language Learning and Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Targeted Content Feedback in Spoken Language Learning and Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-8|PAPER Mon-3-1-8 — Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1980.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-10|PAPER Thu-3-5-10 — Towards Context-Aware End-to-End Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Context-Aware End-to-End Code-Switching Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-8|PAPER Mon-3-1-8 — Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1341.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-8|PAPER Thu-1-9-8 — Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-4|PAPER Wed-1-8-4 — S2IGAN: Speech-to-Image Generation via Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">S2IGAN: Speech-to-Image Generation via Adversarial Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-3|PAPER Thu-2-8-3 — Class LM and Word Mapping for Contextual Biasing in End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class LM and Word Mapping for Contextual Biasing in End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4014.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-12-7|PAPER Mon-1-12-7 — VoiceID on the Fly: A Speaker Recognition System that Learns from Scratch]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceID on the Fly: A Speaker Recognition System that Learns from Scratch</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2052.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-9|PAPER Mon-3-1-9 — A 43 Language Multilingual Punctuation Prediction Neural Network Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A 43 Language Multilingual Punctuation Prediction Neural Network Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-7|PAPER Thu-3-4-7 — ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-1|PAPER Mon-2-10-1 — AutoSpeech: Neural Architecture Search for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech: Neural Architecture Search for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-3|PAPER Wed-1-1-3 — An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1969.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-4|PAPER Wed-3-2-4 — Audio-Visual Multi-Speaker Tracking Based on the GLMB Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Speaker Tracking Based on the GLMB Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-4|PAPER Mon-3-1-4 — Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2556.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-5|PAPER Thu-3-10-5 — Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2487.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-6|PAPER Wed-2-10-6 — LungRN+NL: An Improved Adventitious Lung Sound Classification Using Non-Local Block ResNet Neural Network with Mixup Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LungRN+NL: An Improved Adventitious Lung Sound Classification Using Non-Local Block ResNet Neural Network with Mixup Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1950.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-6|PAPER Mon-1-7-6 — Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-2|PAPER Tue-1-7-2 — A Mask-Based Model for Mandarin Chinese Polyphone Disambiguation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mask-Based Model for Mandarin Chinese Polyphone Disambiguation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2154.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2061.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-8|PAPER Wed-2-12-8 — Speaker-Aware Linear Discriminant Analysis in Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Linear Discriminant Analysis in Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-5|PAPER Thu-1-8-5 — Ensemble Approaches for Uncertainty in Spoken Language Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Approaches for Uncertainty in Spoken Language Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1410.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-3|PAPER Mon-3-8-3 — XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-11|PAPER Thu-1-11-11 — MultiSpeech: Multi-Speaker Text to Speech with Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MultiSpeech: Multi-Speaker Text to Speech with Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-6|PAPER Mon-2-8-6 — An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2809.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-8|PAPER Thu-3-2-8 — A Pyramid Recurrent Network for Predicting Crowdsourced Speech-Quality Ratings of Real-World Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Pyramid Recurrent Network for Predicting Crowdsourced Speech-Quality Ratings of Real-World Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-4|PAPER Mon-1-2-4 — End-to-End Multi-Look Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Look Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-3|PAPER Wed-2-2-3 — Deep Convolutional Spiking Neural Networks for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Convolutional Spiking Neural Networks for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-5|PAPER Mon-1-8-5 — End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2816.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-4|PAPER Thu-1-2-4 — End-to-End ASR with Adaptive Span Self-Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End ASR with Adaptive Span Self-Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1619.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-8|PAPER Thu-1-3-8 — Insertion-Based Modeling for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Insertion-Based Modeling for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-5|PAPER Mon-1-9-5 — A Multi-Scale Fusion Framework for Bimodal Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multi-Scale Fusion Framework for Bimodal Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-7|PAPER Mon-3-4-7 — An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-7|PAPER Wed-2-2-7 — Multi-Scale Convolution for Robust Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale Convolution for Robust Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-6|PAPER Wed-2-11-6 — Tone Learning in Low-Resource Bilingual TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tone Learning in Low-Resource Bilingual TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-3|PAPER Wed-3-5-3 — A Comparative Re-Assessment of Feature Extractors for Deep Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparative Re-Assessment of Feature Extractors for Deep Speaker Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-3|PAPER Thu-1-11-3 — Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1220.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-2|PAPER Thu-3-11-2 — Spatial Resolution of Early Reflection for Speech and White Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Resolution of Early Reflection for Speech and White Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-3|PAPER Tue-1-3-3 — Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1504.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-2|PAPER Mon-1-8-2 — Double Adversarial Network Based Monaural Speech Enhancement for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Double Adversarial Network Based Monaural Speech Enhancement for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1761.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-6|PAPER Wed-2-2-6 — Deep Template Matching for Small-Footprint and Configurable Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Template Matching for Small-Footprint and Configurable Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-2|PAPER Wed-SS-3-11-2 — Deep Embedding Learning for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Embedding Learning for Text-Dependent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-3|PAPER Thu-1-4-3 — Polishing the Classical Likelihood Ratio Test by Supervised Learning for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Polishing the Classical Likelihood Ratio Test by Supervised Learning for Voice Activity Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1260.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-4|PAPER Thu-1-10-4 — A Robust and Cascaded Acoustic Echo Cancellation Based on Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Robust and Cascaded Acoustic Echo Cancellation Based on Deep Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-7|PAPER Thu-3-2-7 — Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-3-2|PAPER Wed-SS-2-3-2 — Speaker Diarization System Based on DPCA Algorithm for Fearless Steps Challenge Phase-2]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization System Based on DPCA Algorithm for Fearless Steps Challenge Phase-2</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DiPCo — Dinner Party Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Investigation of NICT Submission for Short-Duration Speaker Verification Challenge 2020]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of NICT Submission for Short-Duration Speaker Verification Challenge 2020</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1400.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-2|PAPER Wed-1-11-2 — Incorporating Broad Phonetic Information for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Broad Phonetic Information for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1161.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-5|PAPER Mon-2-5-5 — Investigation of Data Augmentation Techniques for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Data Augmentation Techniques for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2061.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-8|PAPER Wed-2-12-8 — Speaker-Aware Linear Discriminant Analysis in Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Linear Discriminant Analysis in Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1161.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-5|PAPER Mon-2-5-5 — Investigation of Data Augmentation Techniques for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Data Augmentation Techniques for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-5|PAPER Tue-1-1-5 — Self-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Training for End-to-End Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-2|PAPER Mon-2-10-2 — Densely Connected Time Delay Neural Network for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Densely Connected Time Delay Neural Network for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-9|PAPER Wed-1-2-9 — Neural Architecture Search for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2985.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-12|PAPER Thu-2-9-12 — Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1072.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-3|PAPER Thu-1-1-3 — A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-8|PAPER Tue-1-4-8 — Age-Related Differences of Tone Perception in Mandarin-Speaking Seniors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Age-Related Differences of Tone Perception in Mandarin-Speaking Seniors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-3|PAPER Mon-3-7-3 — Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-4|PAPER Wed-2-2-4 — Domain Aware Training for Far-Field Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Aware Training for Far-Field Small-Footprint Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-10|PAPER Wed-1-11-10 — NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-6|PAPER Mon-2-8-6 — An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1922.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-7|PAPER Wed-2-12-7 — An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-1|PAPER Thu-1-2-1 — Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2304.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-7|PAPER Mon-3-8-7 — Speech Driven Talking Head Generation via Attentional Landmarks Based Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Talking Head Generation via Attentional Landmarks Based Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-3|PAPER Thu-3-2-3 — Investigating the Visual Lombard Effect with Gabor Based Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Visual Lombard Effect with Gabor Based Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1310.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-3|PAPER Mon-2-7-3 — Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2952.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-1|PAPER Wed-3-7-1 — Noisy-Reverberant Speech Enhancement Using DenseUNet with Time-Frequency Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noisy-Reverberant Speech Enhancement Using DenseUNet with Time-Frequency Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-10|PAPER Mon-1-2-10 — A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2472.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-5|PAPER Thu-2-2-5 — Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1422.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-5|PAPER Mon-2-10-5 — Vector-Based Attentive Pooling for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Based Attentive Pooling for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1046.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-1|PAPER Mon-1-5-1 — Knowledge-and-Data-Driven Amplitude Spectrum Prediction for Hierarchical Neural Vocoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge-and-Data-Driven Amplitude Spectrum Prediction for Hierarchical Neural Vocoders</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1613.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-7|PAPER Thu-1-1-7 — Reverberation Modeling for Source-Filter-Based Neural Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reverberation Modeling for Source-Filter-Based Neural Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-1|PAPER Wed-2-8-1 — Semi-Supervised ASR by End-to-End Self-Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised ASR by End-to-End Self-Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1463.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-6|PAPER Thu-1-1-6 — An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-13|PAPER Thu-2-9-13 — Interactive Text-to-Speech System via Joint Style Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interactive Text-to-Speech System via Joint Style Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1508.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-10|PAPER Mon-2-9-10 — Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1753.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-2|PAPER Tue-1-3-2 — ASR Error Correction with Augmented Transformer for Entity Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Error Correction with Augmented Transformer for Entity Retrieval</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2890.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-6|PAPER Tue-1-9-6 — Speech Sentiment and Customer Satisfaction Estimation in Socialbot Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Sentiment and Customer Satisfaction Estimation in Socialbot Conversations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3109.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-9|PAPER Wed-3-1-9 — Recognize Mispronunciations to Improve Non-Native Acoustic Modeling Through a Phone Decoder Built from One Edit Distance Finite State Automaton]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognize Mispronunciations to Improve Non-Native Acoustic Modeling Through a Phone Decoder Built from One Edit Distance Finite State Automaton</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1934.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-3|PAPER Thu-2-4-3 — Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1310.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-3|PAPER Mon-2-7-3 — Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1047.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-2|PAPER Wed-2-5-2 — An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1259.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-4|PAPER Mon-2-4-4 — Voicing Distinction of Obstruents in the Hangzhou Wu Chinese Dialect]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voicing Distinction of Obstruents in the Hangzhou Wu Chinese Dialect</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2568.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-8|PAPER Wed-2-2-8 — An Investigation of Few-Shot Learning in Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Few-Shot Learning in Spoken Term Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2079.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-6|PAPER Wed-1-5-6 — Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2507.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-2|PAPER Wed-2-1-2 — Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1878.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-4|PAPER Wed-1-2-4 — Learnable Spectro-Temporal Receptive Fields for Robust Voice Type Discrimination]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learnable Spectro-Temporal Receptive Fields for Robust Voice Type Discrimination</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-4|PAPER Mon-1-10-4 — LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-4|PAPER Mon-3-1-4 — Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2556.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-5|PAPER Thu-3-10-5 — Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1416.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-4|PAPER Mon-2-7-4 — TTS Skins: Speaker Conversion via ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TTS Skins: Speaker Conversion via ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-7|PAPER Mon-2-7-7 — Unsupervised Cross-Domain Singing Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Cross-Domain Singing Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-3|PAPER Mon-1-4-3 — Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-3|PAPER Mon-2-12-3 — A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4012.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-7|PAPER Mon-2-12-7 — A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-1|PAPER Thu-3-2-1 — Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-1|PAPER Mon-1-8-1 — Learning Contextual Language Embeddings for Monaural Multi-Talker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Contextual Language Embeddings for Monaural Multi-Talker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-5|PAPER Mon-1-8-5 — End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1255.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-3|PAPER Mon-3-2-3 — Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2028.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-5|PAPER Mon-3-11-5 — Listen, Watch and Understand at the Cocktail Party: Audio-Visual-Contextual Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen, Watch and Understand at the Cocktail Party: Audio-Visual-Contextual Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-6|PAPER Wed-1-7-6 — Multi-Modality Matters: A Performance Leap on VoxCeleb]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modality Matters: A Performance Leap on VoxCeleb</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-9|PAPER Wed-2-12-9 — Adversarial Domain Adaptation for Speaker Verification Using Partially Shared Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Domain Adaptation for Speaker Verification Using Partially Shared Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-8|PAPER Thu-3-5-8 — Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1900.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-6|PAPER Mon-1-2-6 — Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-6|PAPER Mon-3-4-6 — Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-7|PAPER Mon-3-4-7 — An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-3|PAPER Tue-1-1-3 — Investigating Self-Supervised Pre-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Self-Supervised Pre-Training for End-to-End Speech Translation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2298.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-10|PAPER Tue-1-3-10 — Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3186.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-1|PAPER Thu-1-5-1 — Discovering Articulatory Speech Targets from Synthesized Random Babble]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discovering Articulatory Speech Targets from Synthesized Random Babble</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2786.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-10|PAPER Mon-1-5-10 — Speaker Conditional WaveRNN: Towards Universal Neural Vocoder for Unseen Speaker and Recording Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Conditional WaveRNN: Towards Universal Neural Vocoder for Unseen Speaker and Recording Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2793.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-7|PAPER Mon-3-9-7 — Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2786.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-10|PAPER Mon-1-5-10 — Speaker Conditional WaveRNN: Towards Universal Neural Vocoder for Unseen Speaker and Recording Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Conditional WaveRNN: Towards Universal Neural Vocoder for Unseen Speaker and Recording Conditions</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2793.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-7|PAPER Mon-3-9-7 — Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1772.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-8|PAPER Tue-1-2-8 — Speaker Re-Identification with Speaker Dependent Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Re-Identification with Speaker Dependent Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1774.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-4|PAPER Wed-2-12-4 — Weakly Supervised Training of Hierarchical Attention Networks for Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weakly Supervised Training of Hierarchical Attention Networks for Speaker Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-9|PAPER Thu-2-3-9 — Tone Variations in Regionally Accented Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tone Variations in Regionally Accented Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1310.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-3|PAPER Mon-2-7-3 — Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1751.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-5|PAPER Wed-3-4-5 — Enhancing Monotonicity for Robust Autoregressive Transformer TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonicity for Robust Autoregressive Transformer TTS</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-6|PAPER Thu-1-11-6 — MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-2|PAPER Mon-2-3-2 — Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-5|PAPER Wed-1-2-5 — Low Latency Speech Recognition Using End-to-End Prefetching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Speech Recognition Using End-to-End Prefetching</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-4|PAPER Wed-1-5-4 — Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-3|PAPER Thu-SS-2-5-3 — Self-Supervised Spoofing Audio Detection Scheme]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Spoofing Audio Detection Scheme</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1962.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-7|PAPER Thu-1-9-7 — Discriminative Transfer Learning for Optimizing ASR and Semantic Labeling in Task-Oriented Spoken Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Transfer Learning for Optimizing ASR and Semantic Labeling in Task-Oriented Spoken Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-8|PAPER Mon-2-9-8 — Towards an ASR Error Robust Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards an ASR Error Robust Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1614.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-2|PAPER Wed-1-1-2 — Mandarin Lexical Tones: A Corpus-Based Study of Word Length, Syllable Position and Prosodic Position on Duration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin Lexical Tones: A Corpus-Based Study of Word Length, Syllable Position and Prosodic Position on Duration</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-7|PAPER Wed-2-9-7 — DNN No-Reference PSTN Speech Quality Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DNN No-Reference PSTN Speech Quality Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-9|PAPER Thu-3-5-9 — Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-1|PAPER Mon-1-1-1 — On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-8|PAPER Mon-1-1-8 — Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-4|PAPER Mon-2-11-4 — A Federated Approach in Training Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Federated Approach in Training Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2141.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-4|PAPER Tue-1-8-4 — Combination of End-to-End and Hybrid Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combination of End-to-End and Hybrid Models for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-3|PAPER Wed-2-8-3 — Serialized Output Training for End-to-End Overlapped Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Serialized Output Training for End-to-End Overlapped Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-5|PAPER Thu-SS-1-6-5 — Sequence-Level Self-Learning with Multiple Hypotheses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-Level Self-Learning with Multiple Hypotheses</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-10|PAPER Mon-1-4-10 — Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2445.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-9|PAPER Mon-3-11-9 — Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3078.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-7|PAPER Tue-1-1-7 — Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2267.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-4|PAPER Thu-3-6-4 — Dysarthric Speech Recognition Based on Deep Metric Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dysarthric Speech Recognition Based on Deep Metric Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-3|PAPER Mon-1-1-3 — Contextual RNN-T for Open Domain ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual RNN-T for Open Domain ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-3|PAPER Mon-2-11-3 — Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1344.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-6|PAPER Thu-1-3-6 — Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-1|PAPER Mon-1-7-1 — End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-9|PAPER Wed-2-5-9 — Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4012.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-7|PAPER Mon-2-12-7 — A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-10|PAPER Mon-2-5-10 — Focal Loss for Punctuation Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Focal Loss for Punctuation Prediction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1600.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-8|PAPER Wed-3-9-8 — Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-7|PAPER Thu-3-10-7 — Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-1|PAPER Thu-1-4-1 — Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2440.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-10|PAPER Mon-3-1-10 — Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1233.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-5|PAPER Tue-1-8-5 — Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1294.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-3|PAPER Wed-1-8-3 — Adversarial Audio: A New Information Hiding Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Audio: A New Information Hiding Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2890.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-6|PAPER Tue-1-9-6 — Speech Sentiment and Customer Satisfaction Estimation in Socialbot Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Sentiment and Customer Satisfaction Estimation in Socialbot Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-4|PAPER Thu-3-4-4 — VQVC+: One-Shot Voice Conversion by Vector Quantization and U-Net Architecture]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVC+: One-Shot Voice Conversion by Vector Quantization and U-Net Architecture</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1400.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-2|PAPER Wed-1-11-2 — Incorporating Broad Phonetic Information for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Broad Phonetic Information for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-2|PAPER Tue-1-2-2 — Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-5|PAPER Tue-1-7-5 — Deep MOS Predictor for Synthetic Speech Using Cluster-Based Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep MOS Predictor for Synthetic Speech Using Cluster-Based Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-7|PAPER Thu-1-10-7 — Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1498.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-8|PAPER Wed-3-3-8 — Effects of Communication Channels and Actor’s Gender on Emotion Identification by Native Mandarin Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Communication Channels and Actor’s Gender on Emotion Identification by Native Mandarin Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">THUEE System for NIST SRE19 CTS Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1678.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-2|PAPER Wed-1-10-2 — The Effect of Language Dominance on the Selective Attention of Segments and Tones in Urdu-Cantonese Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Language Dominance on the Selective Attention of Segments and Tones in Urdu-Cantonese Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-1|PAPER Wed-2-4-1 — Separating Varying Numbers of Sources with Auxiliary Autoencoding Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Separating Varying Numbers of Sources with Auxiliary Autoencoding Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2487.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-6|PAPER Wed-2-10-6 — LungRN+NL: An Improved Adventitious Lung Sound Classification Using Non-Local Block ResNet Neural Network with Mixup Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LungRN+NL: An Improved Adventitious Lung Sound Classification Using Non-Local Block ResNet Neural Network with Mixup Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-11|PAPER Thu-1-11-11 — MultiSpeech: Multi-Speaker Text to Speech with Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MultiSpeech: Multi-Speaker Text to Speech with Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1481.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-4|PAPER Wed-2-1-4 — Investigation of Phase Distortion on Perceived Speech Quality for Hearing-Impaired Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Phase Distortion on Perceived Speech Quality for Hearing-Impaired Listeners</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1192.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-2|PAPER Mon-2-5-2 — CAM: Uninteresting Speech Detector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CAM: Uninteresting Speech Detector</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-4|PAPER Mon-3-10-4 — Coarticulation as Synchronised Sequential Target Approximation: An EMA Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coarticulation as Synchronised Sequential Target Approximation: An EMA Study</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-3|PAPER Wed-1-1-3 — An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-2|PAPER Thu-2-10-2 — Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2482.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-6|PAPER Thu-2-6-6 — End-to-End Named Entity Recognition from English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Named Entity Recognition from English Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2371.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-7|PAPER Wed-2-4-7 — Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1454.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-5|PAPER Thu-1-10-5 — Generative Adversarial Network Based Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Network Based Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1615.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-7|PAPER Thu-2-9-7 — Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1315.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-8|PAPER Tue-1-8-8 — DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-2|PAPER Thu-1-1-2 — Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1072.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-3|PAPER Thu-1-1-3 — A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1066.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-1|PAPER Thu-3-4-1 — Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-7-7|PAPER Thu-3-7-7 — Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-2|PAPER Wed-3-3-2 — Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1984.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-5|PAPER Mon-3-8-5 — Speech-to-Singing Conversion Based on Boundary Equilibrium GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-to-Singing Conversion Based on Boundary Equilibrium GAN</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-2|PAPER Wed-1-3-2 — Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1700.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-3|PAPER Wed-2-12-3 — Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-1|PAPER Thu-1-7-1 — Dynamic Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Margin Softmax Loss for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1702.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-3|PAPER Thu-2-6-3 — Improved Learning of Word Embeddings with Word Definitions and Semantic Injection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Learning of Word Embeddings with Word Definitions and Semantic Injection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-3|PAPER Mon-3-7-3 — Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2904.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-9|PAPER Tue-1-5-9 — Bandpass Noise Generation and Augmentation for Unified ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bandpass Noise Generation and Augmentation for Unified ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2141.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-4|PAPER Tue-1-8-4 — Combination of End-to-End and Hybrid Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combination of End-to-End and Hybrid Models for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-1|PAPER Wed-1-5-1 — 1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-10|PAPER Thu-3-10-10 — Exploring Transformers for Large-Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Transformers for Large-Scale Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-9|PAPER Mon-2-2-9 — SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-3|PAPER Thu-SS-1-6-3 — Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1922.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-7|PAPER Wed-2-12-7 — An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-4|PAPER Mon-3-1-4 — Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2549.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-10|PAPER Thu-2-3-10 — F0 Patterns in Mandarin Statements of Mandarin and Cantonese Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Patterns in Mandarin Statements of Mandarin and Cantonese Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-4|PAPER Thu-3-9-4 — Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2967.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-5|PAPER Tue-1-3-5 — An Interactive Adversarial Reward Learning-Based Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Interactive Adversarial Reward Learning-Based Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|^<div class="cpauthorindexpersoncardpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-5|PAPER Mon-2-2-5 — PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1811.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-4|PAPER Thu-2-6-4 — Wake Word Detection with Alignment-Free Lattice-Free MMI]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Wake Word Detection with Alignment-Free Lattice-Free MMI</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1923.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-2|PAPER Mon-1-11-2 — The XMUSPEECH System for the AP19-OLR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for the AP19-OLR Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-2|PAPER Wed-1-3-2 — Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2595.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-3|PAPER Wed-1-10-3 — The Effect of Input on the Production of English Tense and Lax Vowels by Chinese Learners: Evidence from an Elementary School in China]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Input on the Production of English Tense and Lax Vowels by Chinese Learners: Evidence from an Elementary School in China</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2408.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-7|PAPER Wed-3-8-7 — A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1922.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-7|PAPER Wed-2-12-7 — An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-8|PAPER Wed-3-10-8 — Learning Syllable-Level Discrete Prosodic Representation for Expressive Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Syllable-Level Discrete Prosodic Representation for Expressive Speech Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-3-11-5|PAPER Wed-SS-3-11-5 — The JD AI Speaker Verification System for the FFSVC 2020 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JD AI Speaker Verification System for the FFSVC 2020 Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-10|PAPER Thu-3-11-10 — Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2408.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-7|PAPER Wed-3-8-7 — A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-3|PAPER Thu-3-4-3 — Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1849.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-5|PAPER Thu-1-3-5 — Investigation of Large-Margin Softmax in Neural Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Large-Margin Softmax in Neural Language Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-8-3|PAPER Tue-1-8-3 — An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-9|PAPER Mon-3-2-9 — Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2350.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-8|PAPER Mon-2-4-8 — Electroglottographic-Phonetic Study on Korean Phonation Induced by Tripartite Plosives in Yanbian Korean]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Electroglottographic-Phonetic Study on Korean Phonation Induced by Tripartite Plosives in Yanbian Korean</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-3|PAPER Mon-1-4-3 — Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-3|PAPER Wed-1-1-3 — An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1281.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-4|PAPER Mon-3-7-4 — Speech Transformer with Speaker Aware Persistent Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Transformer with Speaker Aware Persistent Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1716.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-6|PAPER Thu-3-10-6 — Universal Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Speech Transformer</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1198.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-8|PAPER Thu-3-10-8 — Cross Attention with Monotonic Alignment for Speech Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross Attention with Monotonic Alignment for Speech Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1702.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-3|PAPER Thu-2-6-3 — Improved Learning of Word Embeddings with Word Definitions and Semantic Injection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Learning of Word Embeddings with Word Definitions and Semantic Injection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1852.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-1|PAPER Thu-1-8-1 — Spoken Language ‘Grammatical Error Correction’]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Language ‘Grammatical Error Correction’</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-5|PAPER Mon-2-2-5 — PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-6|PAPER Mon-2-11-6 — On Front-End Gain Invariant Modeling for Wake Word Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Front-End Gain Invariant Modeling for Wake Word Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1980.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-10|PAPER Thu-3-5-10 — Towards Context-Aware End-to-End Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Context-Aware End-to-End Code-Switching Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1341.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-8|PAPER Thu-1-9-8 — Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1120.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-10-4|PAPER Thu-2-10-4 — Perception of English Fricatives and Affricates by Advanced Chinese Learners of English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of English Fricatives and Affricates by Advanced Chinese Learners of English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-8|PAPER Thu-3-5-8 — Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-9|PAPER Thu-1-8-9 — Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2983.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-6|PAPER Tue-1-1-6 — Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-2|PAPER Mon-1-2-2 — Neural Spatio-Temporal Beamformer for Target Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatio-Temporal Beamformer for Target Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2487.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-6|PAPER Wed-2-10-6 — LungRN+NL: An Improved Adventitious Lung Sound Classification Using Non-Local Block ResNet Neural Network with Mixup Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LungRN+NL: An Improved Adventitious Lung Sound Classification Using Non-Local Block ResNet Neural Network with Mixup Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-11-7|PAPER Thu-3-11-7 — Acoustic Signal Enhancement Using Relative Harmonic Coefficients: Spherical Harmonics Domain Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Signal Enhancement Using Relative Harmonic Coefficients: Spherical Harmonics Domain Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-2-7-3|PAPER Wed-SS-2-7-3 — Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-3|PAPER Mon-2-11-3 — Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2079.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-6|PAPER Wed-1-5-6 — Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2371.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-4-7|PAPER Wed-2-4-7 — Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1454.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-5|PAPER Thu-1-10-5 — Generative Adversarial Network Based Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Network Based Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1536.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-6|PAPER Thu-2-2-6 — Comparison of Glottal Source Parameter Values in Emotional Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Glottal Source Parameter Values in Emotional Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2075.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-5|PAPER Wed-3-5-5 — Intra-Class Variation Reduction of Speaker Representation in Disentanglement Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intra-Class Variation Reduction of Speaker Representation in Disentanglement Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2312.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-6-2|PAPER Wed-2-6-2 — Multimodal Speech Emotion Recognition Using Cross Attention with Aligned Audio and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Speech Emotion Recognition Using Cross Attention with Aligned Audio and Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-6|PAPER Wed-1-11-6 — Adaptive Neural Speech Enhancement with a Denoising Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Neural Speech Enhancement with a Denoising Variational Autoencoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1552.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-2|PAPER Wed-SS-1-4-2 — Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2477.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-11|PAPER Wed-3-10-11 — Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1618.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-5|PAPER Wed-1-9-5 — Reconciliation of Multiple Corpora for Speech Emotion Recognition by Multiple Classifiers with an Adversarial Corpus Discriminator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reconciliation of Multiple Corpora for Speech Emotion Recognition by Multiple Classifiers with an Adversarial Corpus Discriminator</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-7|PAPER Mon-2-7-7 — Unsupervised Cross-Domain Singing Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Cross-Domain Singing Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2409.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-8|PAPER Wed-3-7-8 — Real Time Speech Enhancement in the Waveform Domain]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Speech Enhancement in the Waveform Domain</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2398.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-8|PAPER Thu-1-4-8 — Self-Supervised Contrastive Learning for Unsupervised Phoneme Segmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Contrastive Learning for Unsupervised Phoneme Segmentation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2380.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-3-3|PAPER Thu-3-3-3 — Hide and Speak: Towards Deep Neural Networks for Speech Steganography]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hide and Speak: Towards Deep Neural Networks for Speech Steganography</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2404.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-7|PAPER Thu-1-3-7 — Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2271.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-5|PAPER Wed-1-10-5 — Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-4|PAPER Thu-1-9-4 — End-to-End Task-Oriented Dialog System Through Template Slot Value Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Task-Oriented Dialog System Through Template Slot Value Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-4|PAPER Thu-2-9-4 — Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-3|PAPER Mon-1-5-3 — VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1420.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-4|PAPER Mon-2-10-4 — Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-2|PAPER Tue-1-2-2 — Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-5|PAPER Tue-1-7-5 — Deep MOS Predictor for Synthetic Speech Using Cluster-Based Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep MOS Predictor for Synthetic Speech Using Cluster-Based Modeling</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-2|PAPER Wed-2-12-2 — Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0997.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-2|PAPER Thu-1-4-2 — Dual Attention in Time and Frequency Domain for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Attention in Time and Frequency Domain for Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2842.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-7|PAPER Thu-1-8-7 — ASR-Based Evaluation and Feedback for Individualized Reading Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Based Evaluation and Feedback for Individualized Reading Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1908.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-5|PAPER Mon-1-7-5 — Self-Attentive Similarity Measurement Strategies in Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attentive Similarity Measurement Strategies in Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-8|PAPER Mon-1-2-8 — Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1962.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-7|PAPER Thu-1-9-7 — Discriminative Transfer Learning for Optimizing ASR and Semantic Labeling in Task-Oriented Spoken Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Transfer Learning for Optimizing ASR and Semantic Labeling in Task-Oriented Spoken Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-2|PAPER Thu-3-10-2 — Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1617.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-3|PAPER Mon-3-3-3 — Lite Audio-Visual Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lite Audio-Visual Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-9-2|PAPER Mon-3-9-2 — iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1400.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-2|PAPER Wed-1-11-2 — Incorporating Broad Phonetic Information for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Broad Phonetic Information for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2213.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-5|PAPER Wed-1-11-5 — SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-3|PAPER Thu-3-4-3 — Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2154.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1852.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-1|PAPER Thu-1-8-1 — Spoken Language ‘Grammatical Error Correction’]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Language ‘Grammatical Error Correction’</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2967.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-5|PAPER Tue-1-3-5 — An Interactive Adversarial Reward Learning-Based Spoken Language Understanding System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Interactive Adversarial Reward Learning-Based Spoken Language Understanding System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-1|PAPER Mon-1-1-1 — On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1292.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-2|PAPER Wed-1-5-2 — Low Latency End-to-End Streaming Speech Recognition with a Scout Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Latency End-to-End Streaming Speech Recognition with a Scout Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-4|PAPER Mon-2-2-4 — Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-10|PAPER Wed-2-8-10 — SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-4|PAPER Mon-1-9-4 — WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-2|PAPER Thu-SS-1-6-2 — Vector-Quantized Autoregressive Predictive Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vector-Quantized Autoregressive Predictive Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2213.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-5|PAPER Wed-1-11-5 — SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-3-2|PAPER Wed-1-3-2 — Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1493.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-4|PAPER Thu-1-3-4 — Stochastic Convolutional Recurrent Networks for Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Stochastic Convolutional Recurrent Networks for Language Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-2|PAPER Thu-2-11-2 — A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-4|PAPER Wed-1-9-4 — Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-2|PAPER Mon-2-3-2 — Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1824.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-7|PAPER Wed-3-4-7 — Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1806.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-4|PAPER Mon-3-5-4 — Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-4|PAPER Wed-2-2-4 — Domain Aware Training for Far-Field Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Aware Training for Far-Field Small-Footprint Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1593.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-1|PAPER Wed-2-5-1 — Adversarial Latent Representation Learning for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Latent Representation Learning for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-9|PAPER Wed-2-5-9 — Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1436.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-2|PAPER Mon-3-11-2 — Atss-Net: Target Speaker Separation via Attention-Based Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Atss-Net: Target Speaker Separation via Attention-Based Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2427.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-7-7|PAPER Tue-1-7-7 — Distant Supervision for Polyphone Disambiguation in Mandarin Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Distant Supervision for Polyphone Disambiguation in Mandarin Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2516.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-2|PAPER Wed-SS-1-6-2 — Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-3|PAPER Mon-3-1-3 — Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1188.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — Improving X-Vector and PLDA for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving X-Vector and PLDA for Text-Dependent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1403.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-1|PAPER Wed-3-4-1 — Unsupervised Learning for Sequence-to-Sequence Text-to-Speech for Low-Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Learning for Sequence-to-Sequence Text-to-Speech for Low-Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1753.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-2|PAPER Tue-1-3-2 — ASR Error Correction with Augmented Transformer for Entity Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Error Correction with Augmented Transformer for Entity Retrieval</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1934.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-3|PAPER Thu-2-4-3 — Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3040.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-9|PAPER Mon-1-10-9 — Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2453.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-4-7|PAPER Mon-2-4-7 — //Er//-Suffixation in Southwestern Mandarin: An EMA and Ultrasound Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">//Er//-Suffixation in Southwestern Mandarin: An EMA and Ultrasound Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-5|PAPER Thu-SS-2-5-5 — x-Vectors Meet Adversarial Attacks: Benchmarking Adversarial Robustness in Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vectors Meet Adversarial Attacks: Benchmarking Adversarial Robustness in Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2834.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-6|PAPER Thu-SS-2-5-6 — Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1197.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-1|PAPER Mon-2-8-1 — Gated Multi-Head Attention Pooling for Weakly Labelled Audio Tagging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gated Multi-Head Attention Pooling for Weakly Labelled Audio Tagging</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-8-2|PAPER Mon-2-8-2 — Environmental Sound Classification with Parallel Temporal-Spectral Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environmental Sound Classification with Parallel Temporal-Spectral Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-8|PAPER Wed-3-5-8 — Deep Speaker Embedding with Long Short Term Centroid Learning for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Embedding with Long Short Term Centroid Learning for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1647.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-4|PAPER Wed-3-10-4 — Nonparallel Emotional Speech Conversion Using VAE-GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion Using VAE-GAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1307.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-1|PAPER Thu-1-10-1 — A Semi-Blind Source Separation Approach for Speech Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Semi-Blind Source Separation Approach for Speech Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2538.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-5|PAPER Thu-1-7-5 — Angular Margin Centroid Loss for Text-Independent Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Angular Margin Centroid Loss for Text-Independent Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-11-4|PAPER Wed-1-11-4 — Constrained Ratio Mask for Speech Enhancement Using DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Ratio Mask for Speech Enhancement Using DNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-3|PAPER Mon-2-12-3 — A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-13|PAPER Wed-SS-1-6-13 — Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-10|PAPER Wed-3-10-10 — Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2553.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-3-4|PAPER Wed-3-3-4 — Gaming Corpus for Studying Social Screams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gaming Corpus for Studying Social Screams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-6|PAPER Mon-2-12-6 — Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-8-6|PAPER Mon-3-8-6 — Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-5|PAPER Wed-2-11-5 — Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2469.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-9|PAPER Wed-3-4-9 — Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2267.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-4|PAPER Thu-3-6-4 — Dysarthric Speech Recognition Based on Deep Metric Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dysarthric Speech Recognition Based on Deep Metric Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2469.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-9|PAPER Wed-3-4-9 — Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2477.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-11|PAPER Wed-3-10-11 — Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3166.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-3-2|PAPER Thu-2-3-2 — Characterization of Singaporean Children’s English: Comparisons to American and British Counterparts Using Archetypal Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Characterization of Singaporean Children’s English: Comparisons to American and British Counterparts Using Archetypal Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-7|PAPER Thu-2-8-7 — Domain Adaptation Using Class Similarity for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation Using Class Similarity for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2210.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-8|PAPER Mon-3-11-8 — Listen to What You Want: Neural Network-Based Universal Sound Selector]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen to What You Want: Neural Network-Based Universal Sound Selector</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2445.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-9|PAPER Mon-3-11-9 — Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-8|PAPER Wed-1-2-8 — A Transformer-Based Audio Captioning Model with Keyword Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Transformer-Based Audio Captioning Model with Keyword Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2566.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-9|PAPER Thu-2-9-9 — Discriminative Method to Extract Coarse Prosodic Structure and its Application for Statistical Phrase/Accent Command Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Method to Extract Coarse Prosodic Structure and its Application for Statistical Phrase/Accent Command Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1306.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-3|PAPER Mon-2-10-3 — Phonetically-Aware Coupled Network For Short Duration Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Coupled Network For Short Duration Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1307.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-1|PAPER Thu-1-10-1 — A Semi-Blind Source Separation Approach for Speech Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Semi-Blind Source Separation Approach for Speech Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1539.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-6|PAPER Wed-2-5-6 — Sub-Band Knowledge Distillation Framework for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Knowledge Distillation Framework for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-1-5|PAPER Tue-1-1-5 — Self-Training for End-to-End Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Training for End-to-End Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1733.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-3|PAPER Mon-2-1-3 — Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2524.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-9|PAPER Mon-2-1-9 — Speech Representation Learning for Emotion Recognition Using End-to-End ASR with Factorized Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Representation Learning for Emotion Recognition Using End-to-End ASR with Factorized Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1966.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-8|PAPER Mon-2-10-8 — Adversarial Separation Network for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Separation Network for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1570.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-4-1|PAPER Thu-2-4-1 — SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-6|PAPER Mon-2-11-6 — On Front-End Gain Invariant Modeling for Wake Word Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Front-End Gain Invariant Modeling for Wake Word Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1563.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-3-2|PAPER Mon-3-3-2 — SEANet: A Multi-Modal Speech Enhancement Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SEANet: A Multi-Modal Speech Enhancement Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2562.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-7-6|PAPER Thu-1-7-6 — Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2500.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-1-2|PAPER Thu-2-1-2 — Adversarial Dictionary Learning for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Dictionary Learning for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-6|PAPER Mon-3-11-6 — A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1755.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-3|PAPER Thu-3-6-3 — Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Metadata-Aware End-to-End Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1491.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-1|PAPER Wed-3-9-1 — Accurate Detection of Wake Word Start and End Using a CNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Accurate Detection of Wake Word Start and End Using a CNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-6|PAPER Mon-2-9-6 — Improving End-to-End Speech-to-Intent Classification with Reptile]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving End-to-End Speech-to-Intent Classification with Reptile</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2828.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-1-6|PAPER Mon-3-1-6 — Multilingual Jointly Trained Acoustic and Written Word Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Jointly Trained Acoustic and Written Word Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-2|PAPER Mon-3-5-2 — Peking Opera Synthesis via Duration Informed Attention Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Peking Opera Synthesis via Duration Informed Attention Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-7-1|PAPER Mon-1-7-1 — End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1050.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-9|PAPER Mon-1-8-9 — Utterance-Wise Meeting Transcription System Using Asynchronous Distributed Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Utterance-Wise Meeting Transcription System Using Asynchronous Distributed Microphones</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2418.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-10|PAPER Wed-2-5-10 — Speaker-Conditional Chain Model for Speech Separation and Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Conditional Chain Model for Speech Separation and Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1642.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-4|PAPER Mon-1-5-4 — Lightweight LPCNet-Based Neural Vocoder with Tensor Decomposition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lightweight LPCNet-Based Neural Vocoder with Tensor Decomposition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2469.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-9|PAPER Wed-3-4-9 — Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-2|PAPER Thu-1-11-2 — Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-3|PAPER Wed-2-2-3 — Deep Convolutional Spiking Neural Networks for Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Convolutional Spiking Neural Networks for Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-1|PAPER Wed-3-8-1 — Multi-Stream Attention-Based BLSTM with Feature Segmentation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stream Attention-Based BLSTM with Feature Segmentation for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2816.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-4|PAPER Thu-1-2-4 — End-to-End ASR with Adaptive Span Self-Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End ASR with Adaptive Span Self-Attention</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1619.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-3-8|PAPER Thu-1-3-8 — Insertion-Based Modeling for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Insertion-Based Modeling for End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-10-5|PAPER Tue-1-10-5 — Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3158.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-14|PAPER Wed-SS-1-6-14 — Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-9|PAPER Thu-3-9-9 — Aphasic Speech Recognition Using a Mixture of Speech Intelligibility Experts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Aphasic Speech Recognition Using a Mixture of Speech Intelligibility Experts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-11|PAPER Wed-SS-1-4-11 — Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1750.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-8-4|PAPER Thu-2-8-4 — Do End-to-End Speech Recognition Models Care About Context?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do End-to-End Speech Recognition Models Care About Context?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1395.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-5-2|PAPER Wed-3-5-2 — How Does Label Noise Affect the Quality of Speaker Embeddings?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How Does Label Noise Affect the Quality of Speaker Embeddings?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-2|PAPER Mon-1-5-2 — FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-1|PAPER Thu-1-11-1 — From Speaker Verification to Multispeaker Speech Synthesis, Deep Transfer with Feedback Constraint]]</div>|^<div class="cpauthorindexpersoncardpapertitle">From Speaker Verification to Multispeaker Speech Synthesis, Deep Transfer with Feedback Constraint</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-3|PAPER Mon-1-9-3 — Multi-Modal Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-9-4|PAPER Wed-2-9-4 — A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2918.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-11|PAPER Thu-2-9-11 — Controllable Neural Prosody Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Controllable Neural Prosody Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2143.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-3|PAPER Thu-2-11-3 — HiFi-GAN: High-Fidelity Denoising and Dereverberation Based on Speech Deep Features in Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">HiFi-GAN: High-Fidelity Denoising and Dereverberation Based on Speech Deep Features in Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2613.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-9|PAPER Wed-2-2-9 — End-to-End Keyword Search Based on Attention and Energy Scorer for Low Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Keyword Search Based on Attention and Energy Scorer for Low Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3146.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-12-9|PAPER Wed-3-12-9 — Lip Graph Assisted Audio-Visual Speech Recognition Using Bidirectional Synchronous Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lip Graph Assisted Audio-Visual Speech Recognition Using Bidirectional Synchronous Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1307.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-1|PAPER Thu-1-10-1 — A Semi-Blind Source Separation Approach for Speech Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Semi-Blind Source Separation Approach for Speech Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-10-1|PAPER Mon-2-10-1 — AutoSpeech: Neural Architecture Search for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech: Neural Architecture Search for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-1|PAPER Mon-1-2-1 — Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1705.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-9|PAPER Mon-1-9-9 — Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1703.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-6|PAPER Wed-1-9-6 — Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-2|PAPER Wed-3-8-2 — Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1869.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-1-1|PAPER Mon-2-1-1 — Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2365.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-2-5|PAPER Wed-3-2-5 — Towards Speech Robustness for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Speech Robustness for Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2531.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-1|PAPER Thu-3-1-1 — Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-1|PAPER Mon-3-4-1 — Neural Architecture Search on Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Architecture Search on Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2030.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-9-10|PAPER Tue-1-9-10 — How Ordinal Are Your Data?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How Ordinal Are Your Data?</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3135.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-4|PAPER Thu-3-1-4 — Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-7|PAPER Mon-3-7-7 — Learning Fast Adaptation on Cross-Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Fast Adaptation on Cross-Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-3|PAPER Mon-1-9-3 — Multi-Modal Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-13|PAPER Thu-2-9-13 — Interactive Text-to-Speech System via Joint Style Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interactive Text-to-Speech System via Joint Style Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-2|PAPER Wed-2-11-2 — Multi-Lingual Multi-Speaker Text-to-Speech Synthesis for Voice Cloning with Online Speaker Enrollment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Lingual Multi-Speaker Text-to-Speech Synthesis for Voice Cloning with Online Speaker Enrollment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1338.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-3|PAPER Mon-2-3-3 — Statistical Testing on ASR Performance via Blockwise Bootstrap]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Statistical Testing on ASR Performance via Blockwise Bootstrap</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-4|PAPER Mon-2-2-4 — Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-10|PAPER Wed-2-8-10 — SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-4|PAPER Mon-1-3-4 — Congruent Audiovisual Speech Enhances Cortical Envelope Tracking During Auditory Selective Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Congruent Audiovisual Speech Enhances Cortical Envelope Tracking During Auditory Selective Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-2-6|PAPER Wed-1-2-6 — AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-9-8|PAPER Thu-2-9-8 — Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2612.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-4-7|PAPER Tue-1-4-7 — Mandarin and English Adults’ Cue-Weighting of Lexical Stress]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mandarin and English Adults’ Cue-Weighting of Lexical Stress</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1046.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-1|PAPER Mon-1-5-1 — Knowledge-and-Data-Driven Amplitude Spectrum Prediction for Hierarchical Neural Vocoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge-and-Data-Driven Amplitude Spectrum Prediction for Hierarchical Neural Vocoders</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-1|PAPER Mon-2-7-1 — Recognition-Synthesis Based Non-Parallel Voice Conversion with Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition-Synthesis Based Non-Parallel Voice Conversion with Adversarial Learning</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-11-7|PAPER Mon-2-11-7 — Unsupervised Regularization-Based Adaptive Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Regularization-Based Adaptive Training for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1390.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-5|PAPER Mon-3-7-5 — Adaptive Speaker Normalization for CTC-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adaptive Speaker Normalization for CTC-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-2-3|PAPER Tue-1-2-3 — An Adaptive X-Vector Model for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive X-Vector Model for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1613.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-7|PAPER Thu-1-1-7 — Reverberation Modeling for Source-Filter-Based Neural Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reverberation Modeling for Source-Filter-Based Neural Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2550.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-8-6|PAPER Thu-1-8-6 — Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-9|PAPER Mon-3-2-9 — Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1923.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-2|PAPER Mon-1-11-2 — The XMUSPEECH System for the AP19-OLR Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The XMUSPEECH System for the AP19-OLR Challenge</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1960.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-11-3|PAPER Mon-1-11-3 — On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1705.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-9-9|PAPER Mon-1-9-9 — Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1703.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-6|PAPER Wed-1-9-6 — Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1391.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-2-1|PAPER Thu-2-2-1 — Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-7|PAPER Thu-3-4-7 — ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2516.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-2|PAPER Wed-SS-1-6-2 — Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-1-1|PAPER Thu-1-1-1 — Vocoder-Based Speech Synthesis from Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocoder-Based Speech Synthesis from Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1647.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-10-4|PAPER Wed-3-10-4 — Nonparallel Emotional Speech Conversion Using VAE-GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion Using VAE-GAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-8|PAPER Mon-1-5-8 — Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2746.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-1-8|PAPER Thu-3-1-8 — Autoencoder Bottleneck Features with Multi-Task Optimisation for Improved Continuous Dysarthric Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autoencoder Bottleneck Features with Multi-Task Optimisation for Improved Continuous Dysarthric Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-5-10|PAPER Mon-2-5-10 — Focal Loss for Punctuation Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Focal Loss for Punctuation Prediction</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1600.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-8|PAPER Wed-3-9-8 — Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-7|PAPER Thu-3-10-7 — Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-6|PAPER Mon-2-7-6 — Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1754.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-11-3|PAPER Wed-2-11-3 — Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-5|PAPER Wed-3-8-5 — Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1600.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-9-8|PAPER Wed-3-9-8 — Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-3|PAPER Thu-1-11-3 — Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1737.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-11-4|PAPER Thu-1-11-4 — Bi-Level Speaker Supervision for One-Shot Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bi-Level Speaker Supervision for One-Shot Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1225.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-11-9|PAPER Thu-2-11-9 — Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-6|PAPER Thu-3-4-6 — Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-4-7|PAPER Thu-3-4-7 — ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-10-7|PAPER Thu-3-10-7 — Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-6|PAPER Wed-1-7-6 — Multi-Modality Matters: A Performance Leap on VoxCeleb]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modality Matters: A Performance Leap on VoxCeleb</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-12-9|PAPER Wed-2-12-9 — Adversarial Domain Adaptation for Speaker Verification Using Partially Shared Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Domain Adaptation for Speaker Verification Using Partially Shared Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-2-3|PAPER Thu-3-2-3 — Investigating the Visual Lombard Effect with Gabor Based Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Visual Lombard Effect with Gabor Based Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-9-5|PAPER Thu-1-9-5 — Task-Oriented Dialog Generation with Enhanced Entity Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Task-Oriented Dialog Generation with Enhanced Entity Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1710.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-5|PAPER Mon-2-7-5 — GAZEV: GAN-Based Zero-Shot Voice Conversion Over Non-Parallel Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GAZEV: GAN-Based Zero-Shot Voice Conversion Over Non-Parallel Speech Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1706.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-4|PAPER Mon-3-11-4 — X-TaSNet: Robust and Accurate Time-Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">X-TaSNet: Robust and Accurate Time-Domain Speaker Extraction Network</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2624.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-8-8|PAPER Wed-3-8-8 — Meta Multi-Task Learning for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta Multi-Task Learning for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-4-7|PAPER Thu-1-4-7 — MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1257.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-1-5|PAPER Wed-1-1-5 — Neutral Tone in Changde Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neutral Tone in Changde Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2738.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-7-7|PAPER Wed-1-7-7 — Cross-Domain Adaptation with Discrepancy Minimization for Text-Independent Forensic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Adaptation with Discrepancy Minimization for Text-Independent Forensic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1810.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-2-6|PAPER Mon-3-2-6 — Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1618.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-5|PAPER Wed-1-9-5 — Reconciliation of Multiple Corpora for Speech Emotion Recognition by Multiple Classifiers with an Adversarial Corpus Discriminator]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reconciliation of Multiple Corpora for Speech Emotion Recognition by Multiple Classifiers with an Adversarial Corpus Discriminator</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2471.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-2|PAPER Mon-1-1-2 — SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-8|PAPER Wed-1-5-8 — Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1504.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-2|PAPER Mon-1-8-2 — Double Adversarial Network Based Monaural Speech Enhancement for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Double Adversarial Network Based Monaural Speech Enhancement for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-3|PAPER Wed-3-7-3 — Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-9|PAPER Wed-2-5-9 — Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2732.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-6|PAPER Mon-2-2-6 — CAT: A CTC-CRF Based ASR Toolkit Bridging the Hybrid and the End-to-End Approaches Towards Data Efficiency and Low Latency]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CAT: A CTC-CRF Based ASR Toolkit Bridging the Hybrid and the End-to-End Approaches Towards Data Efficiency and Low Latency</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1702.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-2-6-3|PAPER Thu-2-6-3 — Improved Learning of Word Embeddings with Word Definitions and Semantic Injection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Learning of Word Embeddings with Word Definitions and Semantic Injection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-3-5|PAPER Mon-2-3-5 — Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-5-8|PAPER Wed-1-5-8 — Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3188.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-5-11|PAPER Mon-1-5-11 — Neural Homomorphic Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Homomorphic Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-9-4|PAPER Wed-1-9-4 — Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-6|PAPER Wed-3-1-6 — ASR-Free Pronunciation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Free Pronunciation Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2483.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-10|PAPER Mon-3-11-10 — Speaker-Aware Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-9|PAPER Mon-2-2-9 — SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1644.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-2-5|PAPER Wed-2-2-5 — Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1751.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-4-5|PAPER Wed-3-4-5 — Enhancing Monotonicity for Robust Autoregressive Transformer TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhancing Monotonicity for Robust Autoregressive Transformer TTS</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-1-6-3|PAPER Thu-SS-1-6-3 — Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-3-3|PAPER Tue-1-3-3 — Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-1-6|PAPER Wed-3-1-6 — ASR-Free Pronunciation Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR-Free Pronunciation Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-8|PAPER Mon-1-1-8 — Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-8-3|PAPER Wed-2-8-3 — Serialized Output Training for End-to-End Overlapped Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Serialized Output Training for End-to-End Overlapped Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1727.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-2|PAPER Mon-3-7-2 — Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-10-8|PAPER Wed-1-10-8 — Perception and Production of Mandarin Initial Stops by Native Urdu Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception and Production of Mandarin Initial Stops by Native Urdu Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-1-8|PAPER Mon-1-1-8 — Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1089.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-8|PAPER Mon-1-8-8 — Neural Speech Separation Using Spatially Distributed Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Speech Separation Using Spatially Distributed Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-3-2|PAPER Mon-1-3-2 — Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-5|PAPER Wed-2-1-5 — EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1481.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-1-4|PAPER Wed-2-1-4 — Investigation of Phase Distortion on Perceived Speech Quality for Hearing-Impaired Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Phase Distortion on Perceived Speech Quality for Hearing-Impaired Listeners</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1188.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — Improving X-Vector and PLDA for Text-Dependent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving X-Vector and PLDA for Text-Dependent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-2-1|PAPER Thu-1-2-1 — Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-11|PAPER Wed-SS-1-4-11 — Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-7-7|PAPER Mon-3-7-7 — Learning Fast Adaptation on Cross-Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Fast Adaptation on Cross-Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1632.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-2|PAPER Mon-2-9-2 — Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-10-8|PAPER Wed-2-10-8 — Adventitious Respiratory Classification Using Attentive Residual Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adventitious Respiratory Classification Using Attentive Residual Neural Networks</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1980.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-5-10|PAPER Thu-3-5-10 — Towards Context-Aware End-to-End Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Context-Aware End-to-End Code-Switching Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2729.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-6-8|PAPER Wed-SS-1-6-8 — Comparing Natural Language Processing Techniques for Alzheimer’s Dementia Prediction in Spontaneous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparing Natural Language Processing Techniques for Alzheimer’s Dementia Prediction in Spontaneous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1710.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-7-5|PAPER Mon-2-7-5 — GAZEV: GAN-Based Zero-Shot Voice Conversion Over Non-Parallel Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GAZEV: GAN-Based Zero-Shot Voice Conversion Over Non-Parallel Speech Corpus</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1706.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-11-4|PAPER Mon-3-11-4 — X-TaSNet: Robust and Accurate Time-Domain Speaker Extraction Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">X-TaSNet: Robust and Accurate Time-Domain Speaker Extraction Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2396.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-3|PAPER Thu-3-9-3 — Hybrid Network Feature Extraction for Depression Assessment from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Network Feature Extraction for Depression Assessment from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1151.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-3|PAPER Mon-3-4-3 — ATReSN-Net: Capturing Attentive Temporal Relations in Semantic Neighborhood for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ATReSN-Net: Capturing Attentive Temporal Relations in Semantic Neighborhood for Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-2-5-5|PAPER Wed-2-5-5 — Speech Separation Based on Multi-Stage Elaborated Dual-Path Deep BiLSTM with Auxiliary Identity Loss]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Separation Based on Multi-Stage Elaborated Dual-Path Deep BiLSTM with Auxiliary Identity Loss</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-1-4-11|PAPER Wed-SS-1-4-11 — Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-10-4|PAPER Mon-3-10-4 — Coarticulation as Synchronised Sequential Target Approximation: An EMA Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coarticulation as Synchronised Sequential Target Approximation: An EMA Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1307.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-1-10-1|PAPER Thu-1-10-1 — A Semi-Blind Source Separation Approach for Speech Dereverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Semi-Blind Source Separation Approach for Speech Dereverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-12-8|PAPER Mon-2-12-8 — End-to-End Deep Learning Speech Recognition Model for Silent Speech Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Deep Learning Speech Recognition Model for Silent Speech Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2834.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-6|PAPER Thu-SS-2-5-6 — Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-2-5-3|PAPER Thu-SS-2-5-3 — Self-Supervised Spoofing Audio Detection Scheme]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Spoofing Audio Detection Scheme</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-1-8-9|PAPER Wed-1-8-9 — Evaluating Automatically Generated Phoneme Captions for Images]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Automatically Generated Phoneme Captions for Images</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-2-3|PAPER Mon-2-2-3 — Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-3|PAPER Tue-1-5-3 — A Deep 2D Convolutional Network for Waveform-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep 2D Convolutional Network for Waveform-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2656.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-1-5-7|PAPER Tue-1-5-7 — Deep Scattering Power Spectrum Features for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Scattering Power Spectrum Features for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1606.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-1-8-7|PAPER Mon-1-8-7 — Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-3-4-10|PAPER Mon-3-4-10 — Deep Learning Based Open Set Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Open Set Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpconfinfotable|k
|^<a href="./IS2020/HTML/WELCOMEISCA.PDF#page1" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in confinfo view}}</a>|^Welcome from the ISCA President |
|^<a href="./IS2020/HTML/WELCOMECHAIRS.PDF#page1" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in confinfo view}}</a>|^Welcome from the Conference Chairs |
|^<a href="./IS2020/HTML/WELCOMETPC.PDF#page1" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in confinfo view}}</a>|^Welcome from the TPC Chairs |
|^<a href="./IS2020/HTML/COMMITTEE.PDF#page1" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in confinfo view}}</a>|^INTERSPEECH 2020 Organizing Committee |
|^<a href="./IS2020/HTML/SPONSORS.PDF#page1" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in confinfo view}}</a>|^Sponsors |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}}
</p></div>
<div class="cpcopyrightpage">{{$:/causal/publication/Copyright Statement}}</div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jinyu Li|AUTHOR Jinyu Li]]^^1^^, [[Yu Wu|AUTHOR Yu Wu]]^^2^^, [[Yashesh Gaur|AUTHOR Yashesh Gaur]]^^1^^, [[Chengyi Wang|AUTHOR Chengyi Wang]]^^2^^, [[Rui Zhao|AUTHOR Rui Zhao]]^^1^^, [[Shujie Liu|AUTHOR Shujie Liu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Microsoft, USA; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1–5&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, there has been a strong push to transition from hybrid models to end-to-end (E2E) models for automatic speech recognition. Currently, there are three promising E2E methods: recurrent neural network transducer (RNN-T), RNN attention-based encoder-decoder (AED), and Transformer-AED. In this study, we conduct an empirical comparison of RNN-T, RNN-AED, and Transformer-AED models, in both non-streaming and streaming modes. We use 65 thousand hours of Microsoft anonymized training data to train these models. As E2E models are more data hungry, it is better to compare their effectiveness with large amount of training data. To the best of our knowledge, no such comprehensive study has been conducted yet. We show that although AED models are stronger than RNN-T in the non-streaming mode, RNN-T is very competitive in streaming mode if its encoder can be properly initialized. Among all three E2E models, transformer-AED achieved the best accuracy in both streaming and non-streaming mode. We show that both streaming RNN-T and transformer-AED models can obtain better accuracy than a highly-optimized hybrid model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jinhwan Park|AUTHOR Jinhwan Park]], [[Wonyong Sung|AUTHOR Wonyong Sung]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 46–50&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based models with convolutional encoders enable faster training and inference than recurrent neural network-based ones. However, convolutional models often require a very large receptive field to achieve high recognition accuracy, which not only increases the parameter size but also the computational cost and run-time memory footprint. A convolutional encoder with a short receptive field length can suffer from looping or skipping problems when the input utterance contains the same words as nearby sentences. We believe that this is due to the insufficient receptive field length, and try to remedy this problem by adding positional information to the convolution-based encoder. It is shown that the word error rate (WER) of a convolutional encoder with a short receptive field size can be reduced significantly by augmenting it with positional information. Visualization results are presented to demonstrate the effectiveness of adding positional information. The proposed method improves the accuracy of attention models with a convolutional encoder and achieves a WER of 10.60% on TED-LIUMv2 for an end-to-end speech recognition task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhifu Gao|AUTHOR Zhifu Gao]]^^1^^, [[Shiliang Zhang|AUTHOR Shiliang Zhang]]^^1^^, [[Ming Lei|AUTHOR Ming Lei]]^^1^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Alibaba Group, China; ^^2^^SIT, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 6–10&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end speech recognition has become popular in recent years, since it can integrate the acoustic, pronunciation and language models into a single neural network. Among end-to-end approaches, attention-based methods have emerged as being superior. For example, //Transformer//, which adopts an encoder-decoder architecture. The key improvement introduced by Transformer is the utilization of self-attention instead of recurrent mechanisms, enabling both encoder and decoder to capture long-range dependencies with lower computational complexity. In this work, we propose boosting the self-attention ability with a DFSMN memory block, forming the proposed memory equipped self-attention (SAN-M) mechanism. Theoretical and empirical comparisons have been made to demonstrate the relevancy and complementarity between self-attention and the DFSMN memory block. Furthermore, the proposed SAN-M provides an efficient mechanism to integrate these two modules. We have evaluated our approach on the public AISHELL-1 benchmark and an industrial-level 20,000-hour Mandarin speech recognition task. On both tasks, SAN-M systems achieved much better performance than the self-attention based //Transformer// baseline system. Specially, it can achieve a CER of 6.46% on the AISHELL-1 task even without using any external LM, comfortably outperforming other state-of-the-art systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mahaveer Jain|AUTHOR Mahaveer Jain]]^^1^^, [[Gil Keren|AUTHOR Gil Keren]]^^1^^, [[Jay Mahadeokar|AUTHOR Jay Mahadeokar]]^^1^^, [[Geoffrey Zweig|AUTHOR Geoffrey Zweig]]^^1^^, [[Florian Metze|AUTHOR Florian Metze]]^^2^^, [[Yatharth Saraf|AUTHOR Yatharth Saraf]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Facebook, USA; ^^2^^Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 11–15&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end (E2E) systems for automatic speech recognition (ASR), such as RNN Transducer (RNN-T) and Listen-Attend-Spell (LAS) blend the individual components of a traditional hybrid ASR system — acoustic model, language model, pronunciation model — into a single neural network. While this has some nice advantages, it limits the system to be trained using only paired audio and text. Because of this, E2E models tend to have difficulties with correctly recognizing rare words that are not frequently seen during training, such as entity names. In this paper, we propose modifications to the RNN-T model that allow the model to utilize additional metadata text with the objective of improving performance on these named entity words. We evaluate our approach on an in-house dataset sampled from de-identified public social media videos, which represent an open domain ASR task. By using an attention model to leverage the contextual metadata that accompanies a video, we observe a relative improvement of about 16% in Word Error Rate on Named Entities (WER-NE) for videos with related metadata.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jing Pan|AUTHOR Jing Pan]], [[Joshua Shapiro|AUTHOR Joshua Shapiro]], [[Jeremy Wohlwend|AUTHOR Jeremy Wohlwend]], [[Kyu J. Han|AUTHOR Kyu J. Han]], [[Tao Lei|AUTHOR Tao Lei]], [[Tao Ma|AUTHOR Tao Ma]]
</p><p class="cpabstractcardaffiliationlist">ASAPP, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 16–20&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we present state-of-the-art (SOTA) performance on the LibriSpeech corpus with two novel neural network architectures, a //multistream CNN// for acoustic modeling and a //self-attentive simple recurrent unit// (SRU) for language modeling. In the hybrid ASR framework, the multistream CNN acoustic model processes an input of speech frames in multiple parallel pipelines where each stream has a unique dilation rate for diversity. Trained with the SpecAugment data augmentation method, it achieves relative word error rate (WER) improvements of 4% on test-clean and 14% on test-other. We further improve the performance via N-best rescoring using a 24-layer self-attentive SRU language model, achieving WERs of 1.75% on test-clean and 4.46% on test-other.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Deepak Kadetotad|AUTHOR Deepak Kadetotad]], [[Jian Meng|AUTHOR Jian Meng]], [[Visar Berisha|AUTHOR Visar Berisha]], [[Chaitali Chakrabarti|AUTHOR Chaitali Chakrabarti]], [[Jae-sun Seo|AUTHOR Jae-sun Seo]]
</p><p class="cpabstractcardaffiliationlist">Arizona State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 21–25&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The long short-term memory (LSTM) network is one of the most widely used recurrent neural networks (RNNs) for automatic speech recognition (ASR), but is parametrized by millions of parameters. This makes it prohibitive for memory-constrained hardware accelerators as the storage demand causes higher dependence on off-chip memory, which bottlenecks latency and power. In this paper, we propose a new LSTM training technique based on hierarchical coarse-grain sparsity (HCGS), which enforces hierarchical structured sparsity by randomly dropping static block-wise connections between layers. HCGS maintains the same hierarchical structured sparsity throughout training and inference; this reduces weight storage for both training and inference hardware systems. We also jointly optimize in-training quantization with HCGS on 2-/3-layer LSTM networks for the TIMIT and TED-LIUM corpora. With 16× structured compression and 6-bit weight precision, we achieved a phoneme error rate (PER) of 16.9% for TIMIT and a word error rate (WER) of 18.9% for TED-LIUM, showing the best trade-off between error rate and LSTM memory compression compared to prior works.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Timo Lohrenz|AUTHOR Timo Lohrenz]], [[Tim Fingscheidt|AUTHOR Tim Fingscheidt]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität Braunschweig, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 26–30&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Optimal fusion of streams for ASR is a nontrivial problem. Recently, so-called posterior-in-posterior-out (PIPO-)BLSTMs have been proposed that serve as state sequence enhancers and have highly attractive training properties. In this work, we adopt the PIPO-BLSTMs and employ them in the context of stream fusion for ASR. Our contributions are the following: First, we show the positive effect of a PIPO-BLSTM as state sequence enhancer for various stream fusion approaches. Second, we confirm the advantageous context-free (CF) training property of the PIPO-BLSTM for all investigated fusion approaches. Third, we show with a fusion example of two streams, stemming from different short-time Fourier transform window lengths, that all investigated fusion approaches take profit. Finally, the turbo fusion approach turns out to be best, employing a CF-type PIPO-BLSTM with a novel iterative augmentation in training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ngoc-Quan Pham|AUTHOR Ngoc-Quan Pham]]^^1^^, [[Thanh-Le Ha|AUTHOR Thanh-Le Ha]]^^1^^, [[Tuan-Nam Nguyen|AUTHOR Tuan-Nam Nguyen]]^^1^^, [[Thai-Son Nguyen|AUTHOR Thai-Son Nguyen]]^^1^^, [[Elizabeth Salesky|AUTHOR Elizabeth Salesky]]^^2^^, [[Sebastian Stüker|AUTHOR Sebastian Stüker]]^^1^^, [[Jan Niehues|AUTHOR Jan Niehues]]^^3^^, [[Alex Waibel|AUTHOR Alex Waibel]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^KIT, Germany; ^^2^^Johns Hopkins University, USA; ^^3^^Universiteit Maastricht, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 31–35&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformer models are powerful sequence-to-sequence architectures that are capable of directly mapping speech inputs to transcriptions or translations. However, the mechanism for modeling positions in this model was tailored for text modeling, and thus is less ideal for acoustic inputs. In this work, we adapt the relative position encoding scheme to the Speech Transformer, where the key addition is relative distance between input states in the self-attention network. As a result, the network can better adapt to the variable distributions present in speech data. Our experiments show that our resulting model achieves the best recognition result on the Switchboard benchmark in the non-augmentation condition, and the best published result in the MuST-C speech translation benchmark. We also show that this model is able to better utilize synthetic data than the Transformer, and adapts better to variable sentence segmentation quality for speech translation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Xiaofei Wang|AUTHOR Xiaofei Wang]], [[Zhong Meng|AUTHOR Zhong Meng]], [[Zhuo Chen|AUTHOR Zhuo Chen]], [[Tianyan Zhou|AUTHOR Tianyan Zhou]], [[Takuya Yoshioka|AUTHOR Takuya Yoshioka]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 36–40&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose an end-to-end speaker-attributed automatic speech recognition model that unifies speaker counting, speech recognition, and speaker identification on monaural overlapped speech. Our model is built on serialized output training (SOT) with attention-based encoder-decoder, a recently proposed method for recognizing overlapped speech comprising an arbitrary number of speakers. We extend SOT by introducing a speaker inventory as an auxiliary input to produce speaker labels as well as multi-speaker transcriptions. All model parameters are optimized by speaker-attributed maximum mutual information criterion, which represents a joint probability for overlapped speech recognition and speaker identification. Experiments on LibriSpeech corpus show that our proposed method achieves significantly better speaker-attributed word error rate than the baseline that separately performs overlapped speech recognition and speaker identification.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takashi Fukuda|AUTHOR Takashi Fukuda]]^^1^^, [[Samuel Thomas|AUTHOR Samuel Thomas]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, Japan; ^^2^^IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 41–45&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a novel generalized knowledge distillation framework, with an implicit transfer of privileged information. In our proposed framework, teacher networks are trained with two input branches on pairs of time-synchronous lossless and lossy acoustic features. While one branch of the teacher network processes a privileged view of the data using lossless features, the second branch models a student view, by processing lossy features corresponding to the same data. During the training step, weights of this teacher network are updated using a composite two-part cross entropy loss. The first part of this loss is computed between the predicted output labels of the lossless data and the actual ground truth. The second part of the loss is computed between the predicted output labels of the lossy data and lossless data. In the next step of generating soft labels, only the student view branch of the teacher is used with lossy data. The benefit of this proposed technique is shown on speech signals with long-term time-frequency bandwidth loss due to recording devices and network conditions. Compared to conventional generalized knowledge distillation with privileged information, the proposed method has a relative improvement of 9.5% on both lossless and lossy test sets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bo Yang|AUTHOR Bo Yang]]^^1^^, [[Xianlong Tan|AUTHOR Xianlong Tan]]^^2^^, [[Zhengmao Chen|AUTHOR Zhengmao Chen]]^^1^^, [[Bing Wang|AUTHOR Bing Wang]]^^2^^, [[Min Ruan|AUTHOR Min Ruan]]^^2^^, [[Dan Li|AUTHOR Dan Li]]^^2^^, [[Zhongping Yang|AUTHOR Zhongping Yang]]^^3^^, [[Xiping Wu|AUTHOR Xiping Wu]]^^1^^, [[Yi Lin|AUTHOR Yi Lin]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Sichuan University, China; ^^2^^CAAC, China; ^^3^^Wisesoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 399–403&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic Speech Recognition (ASR) technique has been greatly developed in recent years, which expedites many applications in other fields. For the ASR research, speech corpus is always an essential foundation, especially for the vertical industry, such as Air Traffic Control (ATC). There are some speech corpora for common applications, public or paid. However, for the ATC domain, it is difficult to collect raw speeches from real systems due to safety issues. More importantly, annotating the transcription is a more laborious work for the supervised learning ASR task, which hugely restricts the prospect of ASR application. In this paper, a multilingual speech corpus (ATCSpeech) from real ATC systems, including accented Mandarin Chinese and English speeches, is built and released to encourage the non-commercial ASR research in the ATC domain. The corpus is detailly introduced from the perspective of data amount, speaker gender and role, speech quality and other attributions. In addition, the performance of baseline ASR models is also reported. A community edition for our speech database can be applied and used under a special contract. To our best knowledge, this is the first work that aims at building a real and multilingual ASR corpus for the ATC related research. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andreas Kirkedal|AUTHOR Andreas Kirkedal]], [[Marija Stepanović|AUTHOR Marija Stepanović]], [[Barbara Plank|AUTHOR Barbara Plank]]
</p><p class="cpabstractcardaffiliationlist">ITU, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 442–446&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces FT SPEECH, a new speech corpus created from the recorded meetings of the Danish Parliament, otherwise known as the //Folketing// (FT). The corpus contains over 1,800 hours of transcribed speech by a total of 434 speakers. It is significantly larger in duration, vocabulary, and amount of spontaneous speech than the existing public speech corpora for Danish, which are largely limited to read-aloud and dictation data. We outline design considerations, including the preprocessing methods and the alignment procedure. To evaluate the quality of the corpus, we train automatic speech recognition systems (ASR) on the new resource and compare them to the systems trained on the Danish part of Språkbanken, the largest public ASR corpus for Danish to date. Our baseline results show that we achieve a 14.01 WER on the new corpus. A combination of FT SPEECH with in-domain language data provides comparable results to models trained specifically on Språkbanken, showing that FT SPEECH transfers well to this data set. Interestingly, our results demonstrate that the opposite is not the case. This shows that FT SPEECH provides a valuable resource for promoting research on Danish ASR with more spontaneous speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexander Gutkin|AUTHOR Alexander Gutkin]]^^1^^, [[Işın Demirşahin|AUTHOR Işın Demirşahin]]^^1^^, [[Oddur Kjartansson|AUTHOR Oddur Kjartansson]]^^1^^, [[Clara Rivera|AUTHOR Clara Rivera]]^^1^^, [[Kọ́lá Túbọ̀sún|AUTHOR Kọ́lá Túbọ̀sún]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, UK; ^^2^^British Library, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 404–408&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces an open-source speech dataset for Yoruba — one of the largest low-resource West African languages spoken by at least 22 million people. Yoruba is one of the official languages of Nigeria, Benin and Togo, and is spoken in other neighboring African countries and beyond. The corpus consists of over four hours of 48 kHz recordings from 36 male and female volunteers and the corresponding transcriptions that include disfluency annotation. The transcriptions have full diacritization, which is vital for pronunciation and lexical disambiguation. The annotated speech dataset described in this paper is primarily intended for use in text-to-speech systems, serve as adaptation data in automatic speech recognition and speech-to-speech translation, and provide insights in West African corpus linguistics. We demonstrate the use of this corpus in a simple statistical parametric speech synthesis (SPSS) scenario evaluating it against the related languages from the CMU Wilderness dataset and the Yoruba Lagos-NWU corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jung-Woo Ha|AUTHOR Jung-Woo Ha]]^^1^^, [[Kihyun Nam|AUTHOR Kihyun Nam]]^^1^^, [[Jingu Kang|AUTHOR Jingu Kang]]^^1^^, [[Sang-Woo Lee|AUTHOR Sang-Woo Lee]]^^1^^, [[Sohee Yang|AUTHOR Sohee Yang]]^^1^^, [[Hyunhoon Jung|AUTHOR Hyunhoon Jung]]^^1^^, [[Hyeji Kim|AUTHOR Hyeji Kim]]^^1^^, [[Eunmi Kim|AUTHOR Eunmi Kim]]^^1^^, [[Soojin Kim|AUTHOR Soojin Kim]]^^1^^, [[Hyun Ah Kim|AUTHOR Hyun Ah Kim]]^^1^^, [[Kyoungtae Doh|AUTHOR Kyoungtae Doh]]^^1^^, [[Chan Kyu Lee|AUTHOR Chan Kyu Lee]]^^1^^, [[Nako Sung|AUTHOR Nako Sung]]^^1^^, [[Sunghun Kim|AUTHOR Sunghun Kim]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Naver, Korea; ^^2^^HKUST, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 409–413&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) via call is essential for various applications, including AI for contact center (AICC) services. Despite the advancement of ASR, however, most publicly available call-based speech corpora such as Switchboard are old-fashioned. Also, most existing call corpora are in English and mainly focus on open domain dialog or general scenarios such as audiobooks. Here we introduce a new large-scale Korean call-based speech corpus under a goal-oriented dialog scenario from more than 11,000 people, i.e., ClovaCall corpus. ClovaCall includes approximately 60,000 pairs of a short sentence and its corresponding spoken utterance in a restaurant reservation domain. We validate the effectiveness of our dataset with intensive experiments using two standard ASR models. Furthermore, we release our ClovaCall dataset and baseline source codes to be available via https://github.com/ClovaAI/ClovaCall</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanhong Wang|AUTHOR Yanhong Wang]]^^1^^, [[Huan Luan|AUTHOR Huan Luan]]^^1^^, [[Jiahong Yuan|AUTHOR Jiahong Yuan]]^^2^^, [[Bin Wang|AUTHOR Bin Wang]]^^1^^, [[Hui Lin|AUTHOR Hui Lin]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LAIX, China; ^^2^^Baidu, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 414–418&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces a corpus of Chinese Learner English containing 82 hours of L2 English speech by Chinese learners from all major dialect regions, collected through mobile apps developed by LAIX Inc. The LAIX corpus was created to serve as a benchmark dataset for evaluating Automatic Speech Recognition (ASR) performance on L2 English, the first of this kind as far as we know. The paper describes our effort to build the corpus, including corpus design, data selection and transcription. Multiple rounds of quality check were conducted in the transcription process. Transcription errors were analyzed in terms of error types, rounds of reviewing, and learners’ proficiency levels. Word error rates of state-of-the-art ASR systems on the benchmark corpus were also reported.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vikram Ramanarayanan|AUTHOR Vikram Ramanarayanan]]
</p><p class="cpabstractcardaffiliationlist">Educational Testing Service, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 419–423&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a carefully designed corpus of scored spoken conversations between English language learners and a dialog system to facilitate research and development of both human and machine scoring of dialog interactions. We collected speech, demographic and user experience data from non-native speakers of English who interacted with a virtual boss as part of a workplace pragmatics skill building application. Expert raters then scored the dialogs on a custom rubric encompassing 12 aspects of conversational proficiency as well as an overall holistic performance score. We analyze key corpus statistics and discuss the advantages of such a corpus for both human and machine scoring.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Si-Ioi Ng|AUTHOR Si-Ioi Ng]], [[Cymie Wing-Yee Ng|AUTHOR Cymie Wing-Yee Ng]], [[Jiarui Wang|AUTHOR Jiarui Wang]], [[Tan Lee|AUTHOR Tan Lee]], [[Kathy Yuet-Sheung Lee|AUTHOR Kathy Yuet-Sheung Lee]], [[Michael Chi-Fai Tong|AUTHOR Michael Chi-Fai Tong]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 424–428&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the design and development of CUCHILD, a large-scale Cantonese corpus of child speech. The corpus contains spoken words collected from 1,986 child speakers aged from 3 to 6 years old. The speech materials include 130 words of 1 to 4 syllables in length. The speakers cover both typically developing (TD) children and children with speech disorder. The intended use of the corpus is to support scientific and clinical research, as well as technology development related to child speech assessment. The design of the corpus, including selection of words, participants recruitment, data acquisition process, and data pre-processing are described in detail. The results of acoustical analysis are presented to illustrate the properties of child speech. Potential applications of the corpus in automatic speech recognition, phonological error detection and speaker diarization are also discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Katri Leino|AUTHOR Katri Leino]]^^1^^, [[Juho Leinonen|AUTHOR Juho Leinonen]]^^1^^, [[Mittul Singh|AUTHOR Mittul Singh]]^^1^^, [[Sami Virpioja|AUTHOR Sami Virpioja]]^^2^^, [[Mikko Kurimo|AUTHOR Mikko Kurimo]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^University of Helsinki, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 429–433&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Creating open-domain chatbots requires large amounts of conversational data and related benchmark tasks to evaluate them. Standardized evaluation tasks are crucial for creating automatic evaluation metrics for model development; otherwise, comparing the models would require resource-expensive human evaluation. While chatbot challenges have recently managed to provide a plethora of such resources for English, resources in other languages are not yet available. In this work, we provide a starting point for Finnish open-domain chatbot research. We describe our collection efforts to create the Finnish chat conversation corpus FinChat, which is made available publicly. FinChat includes unscripted conversations on seven topics from people of different ages. Using this corpus, we also construct a retrieval-based evaluation task for Finnish chatbot development. We observe that off-the-shelf chatbot models trained on conversational corpora do not perform better than chance at choosing the right answer based on automatic metrics, while humans can do the same task almost perfectly. Similarly, in a human evaluation, responses to questions from the evaluation set generated by the chatbots are predominantly marked as incoherent. Thus, FinChat provides a challenging evaluation set, meant to encourage chatbot development in Finnish.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maarten Van Segbroeck|AUTHOR Maarten Van Segbroeck]]^^1^^, [[Ahmed Zaid|AUTHOR Ahmed Zaid]]^^1^^, [[Ksenia Kutsenko|AUTHOR Ksenia Kutsenko]]^^1^^, [[Cirenia Huerta|AUTHOR Cirenia Huerta]]^^1^^, [[Tinh Nguyen|AUTHOR Tinh Nguyen]]^^1^^, [[Xuewen Luo|AUTHOR Xuewen Luo]]^^1^^, [[Björn Hoffmeister|AUTHOR Björn Hoffmeister]]^^1^^, [[Jan Trmal|AUTHOR Jan Trmal]]^^2^^, [[Maurizio Omologo|AUTHOR Maurizio Omologo]]^^3^^, [[Roland Maas|AUTHOR Roland Maas]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^Johns Hopkins University, USA; ^^3^^FBK, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 434–436&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a speech data corpus that simulates a “dinner party” scenario taking place in an everyday home environment. The corpus was created by recording multiple groups of four Amazon employee volunteers having a natural conversation in English around a dining table. The participants were recorded by a single-channel close-talk microphone and by five far-field 7-microphone array devices positioned at different locations in the recording room. The dataset contains the audio recordings and human labeled transcripts of a total of 10 sessions with a duration between 15 and 45 minutes. The corpus was created to advance in the field of noise robust and distant speech processing and is intended to serve as a public research and benchmarking data set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bo Wang|AUTHOR Bo Wang]]^^1^^, [[Yue Wu|AUTHOR Yue Wu]]^^1^^, [[Niall Taylor|AUTHOR Niall Taylor]]^^1^^, [[Terry Lyons|AUTHOR Terry Lyons]]^^1^^, [[Maria Liakata|AUTHOR Maria Liakata]]^^2^^, [[Alejo J. Nevado-Holgado|AUTHOR Alejo J. Nevado-Holgado]]^^1^^, [[Kate E.A. Saunders|AUTHOR Kate E.A. Saunders]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Oxford, UK; ^^2^^Alan Turing Institute, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 437–441&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Bipolar disorder (BD) and borderline personality disorder (BPD) are both chronic psychiatric disorders. However, their overlapping symptoms and common comorbidity make it challenging for the clinicians to distinguish the two conditions on the basis of a clinical interview. In this work, we first present a new multi-modal dataset containing interviews involving individuals with BD or BPD being interviewed about a non-clinical topic . We investigate the automatic detection of the two conditions, and demonstrate a good linear classifier that can be learnt using a down-selected set of features from the different aspects of the interviews and a novel approach of summarising these features. Finally, we find that different sets of features characterise BD and BPD, thus providing insights into the difference between the automatic screening of the two conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Raphaël Duroselle|AUTHOR Raphaël Duroselle]], [[Denis Jouvet|AUTHOR Denis Jouvet]], [[Irina Illina|AUTHOR Irina Illina]]
</p><p class="cpabstractcardaffiliationlist">Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 447–451&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art language recognition systems are based on discriminative embeddings called //x-vectors//. Channel and gender distortions produce mismatch in such //x-vector// space where embeddings corresponding to the same language are not grouped in an unique cluster. To control this mismatch, we propose to train the //x-vector// DNN with metric learning objective functions. Combining a classification loss with the metric learning n-pair loss allows to improve the language recognition performance. Such a system achieves a robustness comparable to a system trained with a domain adaptation loss function but without using the domain information. We also analyze the mismatch due to channel and gender, in comparison to language proximity, in the //x-vector// space. This is achieved using the Maximum Mean Discrepancy divergence measure between groups of //x-vectors//. Our analysis shows that using the metric learning loss function reduces gender and channel mismatch in the //x-vector// space, even for languages only observed on one channel in the train set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zheng Li|AUTHOR Zheng Li]], [[Miao Zhao|AUTHOR Miao Zhao]], [[Jing Li|AUTHOR Jing Li]], [[Yiming Zhi|AUTHOR Yiming Zhi]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]
</p><p class="cpabstractcardaffiliationlist">Xiamen University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 452–456&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present our XMUSPEECH system for the oriental language recognition (OLR) challenge, AP19-OLR. The challenge this year contained three tasks: (1) short-utterance LID, (2) cross-channel LID, and (3) zero-resource LID. We leveraged the system pipeline from three aspects, including front-end training, back-end processing, and fusion strategy. We implemented many encoder networks for Tasks 1 and 3, such as extended x-vector, multi-task learning x-vector with phonetic information, and our previously presented multi-feature integration structure. Furthermore, our previously proposed length expansion method was used in the test set for Task 1. I-vector systems based on different acoustic features were built for the cross-channel task. For all of three tasks, the same back-end procedure was used for the sake of stability but with different settings for three tasks. Finally, the greedy fusion strategy helped to choose the subsystems to compose the final fusion systems (submitted systems). //Cavg// values of 0.0263, 0.2813, and 0.1697 from the development set for Task 1, 2, and 3 were obtained from our submitted systems, and we achieved rank //3rd//, //3rd//, and //1st// in the three tasks in this challenge, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zheng Li|AUTHOR Zheng Li]], [[Miao Zhao|AUTHOR Miao Zhao]], [[Jing Li|AUTHOR Jing Li]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]
</p><p class="cpabstractcardaffiliationlist">Xiamen University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 457–461&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we study the technology of multiple acoustic feature integration for the applications of Automatic Speaker Verification (ASV) and Language Identification (LID). In contrast to score level fusion, a common method for integrating subsystems built upon various acoustic features, we explore a new integration strategy, which integrates multiple acoustic features based on the x-vector framework. The frame level, statistics pooling level, segment level, and embedding level integrations are investigated in this study. Our results indicate that frame level integration of multiple acoustic features achieves the best performance in both speaker and language recognition tasks, and the multi-feature integration strategy can be generalized in both classification tasks. Furthermore, we introduce a time-restricted attention mechanism into the frame level integration structure to further improve the performance of multi-feature integration. The experiments are conducted on VoxCeleb 1 for ASV and AP-OLR-17 for LID, and we achieve 28% and 19% relative improvement in terms of Equal Error Rate (EER) in ASV and LID tasks, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shammur A. Chowdhury|AUTHOR Shammur A. Chowdhury]]^^1^^, [[Ahmed Ali|AUTHOR Ahmed Ali]]^^1^^, [[Suwon Shon|AUTHOR Suwon Shon]]^^2^^, [[James Glass|AUTHOR James Glass]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^HBKU, Qatar; ^^2^^ASAPP, USA; ^^3^^MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 462–466&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>An end-to-end dialect identification system generates the likelihood of each dialect, given a speech utterance. The performance relies on its capabilities to discriminate the acoustic properties between the different dialects, even though the input signal contains non-dialectal information such as speaker and channel. In this work, we study how non-dialectal information are encoded inside the end-to-end dialect identification model. We design several proxy tasks to understand the model’s ability to represent speech input for differentiating non-dialectal information — such as (a) gender and voice identity of speakers, (b) languages, (c) channel (recording and transmission) quality — and compare with dialectal information (i.e., predicting geographic region of the dialects). By analyzing non-dialectal representations from layers of an end-to-end Arabic dialect identification (ADI) model, we observe that the model retains gender and channel information throughout the network while learning a speaker-invariant representation. Our findings also suggest that the CNN layers of the end-to-end model mirror feature extractors capturing voice-specific information, while the fully-connected layers encode more dialectal information.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Matias Lindgren|AUTHOR Matias Lindgren]]^^1^^, [[Tommi Jauhiainen|AUTHOR Tommi Jauhiainen]]^^2^^, [[Mikko Kurimo|AUTHOR Mikko Kurimo]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^University of Helsinki, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 467–471&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a software toolkit for easier end-to-end training of deep learning based spoken language identification models across several speech datasets. We apply our toolkit to implement three baseline models, one speaker recognition model, and three x-vector architecture variations, which are trained on three datasets previously used in spoken language identification experiments. All models are trained separately on each dataset (closed task) and on a combination of all datasets (open task), after which we compare if the open task training yields better language embeddings. We begin by training all models end-to-end as discriminative classifiers of spectral features, labeled by language. Then, we extract language embedding vectors from the trained end-to-end models, train separate Gaussian Naive Bayes classifiers on the vectors, and compare which model provides best language embeddings for the backend classifier. Our experiments show that the open task condition leads to improved language identification performance on only one of the datasets. In addition, we discovered that increasing x-vector model robustness with random frequency channel dropout significantly reduces its end-to-end classification performance on the test set, while not affecting back-end classification performance of its embeddings. Finally, we note that two baseline models consistently outperformed all other models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aitor Arronte Alvarez|AUTHOR Aitor Arronte Alvarez]]^^1^^, [[Elsayed Sabry Abdelaal Issa|AUTHOR Elsayed Sabry Abdelaal Issa]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad Politécnica de Madrid, Spain; ^^2^^University of Arizona, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 472–476&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This article presents a full end-to-end pipeline for Arabic Dialect Identification (ADI) using intonation patterns and acoustic representations. Recent approaches to language and dialect identification use linguistic-aware deep architectures that are able to capture phonetic differences amongst languages and dialects. Specifically, in ADI tasks, different combinations of linguistic features and acoustic representations have been successful with deep learning models. The approach presented in this article uses intonation patterns and hybrid residual and bidirectional LSTM networks to learn acoustic embeddings with no additional linguistic information. Results of the experiments show that intonation patterns for Arabic dialects provide sufficient information to achieve state-of-the-art results on the VarDial 17 ADI dataset, outperforming single-feature systems. The pipeline presented is robust to data sparsity, in contrast to other deep learning approaches that require large quantities of data. We conjecture on the importance of sufficient information as a criterion for optimality in a deep learning ADI task, and more generally, its application to acoustic modeling problems. Small intonation patterns, when sufficient in an information-theoretic sense, allow deep learning architectures to learn more accurate speech representations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Badr M. Abdullah|AUTHOR Badr M. Abdullah]], [[Tania Avgustinova|AUTHOR Tania Avgustinova]], [[Bernd Möbius|AUTHOR Bernd Möbius]], [[Dietrich Klakow|AUTHOR Dietrich Klakow]]
</p><p class="cpabstractcardaffiliationlist">Universität des Saarlandes, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 477–481&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art spoken language identification (LID) systems, which are based on end-to-end deep neural networks, have shown remarkable success not only in discriminating between distant languages but also between closely-related languages or even different spoken varieties of the same language. However, it is still unclear to what extent neural LID models generalize to speech samples with different acoustic conditions due to domain shift. In this paper, we present a set of experiments to investigate the impact of domain mismatch on the performance of neural LID systems for a subset of six Slavic languages across two domains (read speech and radio broadcast) and examine two low-level signal descriptors (spectral and cepstral features) for this task. Our experiments show that (1) out-of-domain speech samples severely hinder the performance of neural LID models, and (2) while both spectral and cepstral features show comparable performance within-domain, spectral features show more robustness under domain mismatch. Moreover, we apply unsupervised domain adaptation to minimize the discrepancy between the two domains in our study. We achieve relative accuracy improvements that range from 9% to 77% depending on the diversity of acoustic conditions in the source domain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Noé Tits|AUTHOR Noé Tits]], [[Kevin El Haddad|AUTHOR Kevin El Haddad]], [[Thierry Dutoit|AUTHOR Thierry Dutoit]]
</p><p class="cpabstractcardaffiliationlist">Université de Mons, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 482–483&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>ICE-Talk is an open source¹ web-based GUI that allows the use of a TTS system with controllable parameters via a text field and a clickable 2D plot. It enables the study of latent spaces for controllable TTS. Moreover it is implemented as a module that can be used as part of a Human-Agent interaction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mathieu Hu|AUTHOR Mathieu Hu]], [[Laurent Pierron|AUTHOR Laurent Pierron]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]], [[Denis Jouvet|AUTHOR Denis Jouvet]]
</p><p class="cpabstractcardaffiliationlist">Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 484–485&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech provides an intuitive interface to communicate with machines. Today, developers willing to implement such an interface must either rely on third-party proprietary software or become experts in speech recognition. Conversely, researchers in speech recognition wishing to demonstrate their results need to be familiar with technologies that are not relevant to their research (e.g., graphical user interface libraries). In this demo, we introduce Kaldi-web¹: an open-source, cross-platform tool which bridges this gap by providing a user interface built around the online decoder of the Kaldi toolkit. Additionally, because we compile Kaldi to Web Assembly, speech recognition is performed directly in web browsers. This addresses privacy issues as no data is transmitted to the network for speech recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amelia C. Kelly|AUTHOR Amelia C. Kelly]], [[Eleni Karamichali|AUTHOR Eleni Karamichali]], [[Armin Saeb|AUTHOR Armin Saeb]], [[Karel Veselý|AUTHOR Karel Veselý]], [[Nicholas Parslow|AUTHOR Nicholas Parslow]], [[Agape Deng|AUTHOR Agape Deng]], [[Arnaud Letondor|AUTHOR Arnaud Letondor]], [[Robert O’Regan|AUTHOR Robert O’Regan]], [[Qiru Zhou|AUTHOR Qiru Zhou]]
</p><p class="cpabstractcardaffiliationlist">SoapBox Labs, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 486–487&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>SoapBox Labs’ child speech verification platform is a service designed specifically for identifying keywords and phrases in children’s speech. Given an audio file containing children’s speech and one or more target keywords or phrases, the system will return the confidence score of recognition for the word(s) or phrase(s) within the the audio file. The confidence scores are provided at utterance level, word level and phoneme level. The service is available online through an cloud API service, or offline on Android and iOS. The platform is accurate for child speech from children as young as 3, and is robust to noisy environments. In this demonstration we show how to access the online API and give some examples of common use cases in literacy and language learning, gaming and robotics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amelia C. Kelly|AUTHOR Amelia C. Kelly]], [[Eleni Karamichali|AUTHOR Eleni Karamichali]], [[Armin Saeb|AUTHOR Armin Saeb]], [[Karel Veselý|AUTHOR Karel Veselý]], [[Nicholas Parslow|AUTHOR Nicholas Parslow]], [[Gloria Montoya Gomez|AUTHOR Gloria Montoya Gomez]], [[Agape Deng|AUTHOR Agape Deng]], [[Arnaud Letondor|AUTHOR Arnaud Letondor]], [[Niall Mullally|AUTHOR Niall Mullally]], [[Adrian Hempel|AUTHOR Adrian Hempel]], [[Robert O’Regan|AUTHOR Robert O’Regan]], [[Qiru Zhou|AUTHOR Qiru Zhou]]
</p><p class="cpabstractcardaffiliationlist">SoapBox Labs, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 488–489&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The SoapBox Labs Fluency API service allows the automatic assessment of a child’s reading fluency. The system uses automatic speech recognition (ASR) to transcribe the child’s speech as they read a passage. The ASR output is then compared to the text of the reading passage, and the fluency algorithm returns information about the accuracy of the child’s reading attempt. In this show and tell paper we describe how the fluency cloud API is accessed and demonstrate how the fluency demo system processes an audio file, as shown in the accompanying video.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Baybars Külebi|AUTHOR Baybars Külebi]]^^1^^, [[Alp Öktem|AUTHOR Alp Öktem]]^^1^^, [[Alex Peiró-Lilja|AUTHOR Alex Peiró-Lilja]]^^2^^, [[Santiago Pascual|AUTHOR Santiago Pascual]]^^3^^, [[Mireia Farrús|AUTHOR Mireia Farrús]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Col•lectivaT, Spain; ^^2^^Universitat Pompeu Fabra, Spain; ^^3^^Dolby Laboratories, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 490–491&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present Catotron, a neural network-based open-source speech synthesis system in Catalan. Catotron consists of a sequence-to-sequence model trained with two small open-source datasets based on semi-spontaneous and read speech. We demonstrate how a neural TTS can be built for languages with limited resources using found-data optimization and cross-lingual transfer learning. We make the datasets, initial models and source code publicly available for both commercial and research purposes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vikram Ramanarayanan|AUTHOR Vikram Ramanarayanan]], [[Oliver Roesler|AUTHOR Oliver Roesler]], [[Michael Neumann|AUTHOR Michael Neumann]], [[David Pautler|AUTHOR David Pautler]], [[Doug Habberstad|AUTHOR Doug Habberstad]], [[Andrew Cornish|AUTHOR Andrew Cornish]], [[Hardik Kothare|AUTHOR Hardik Kothare]], [[Vignesh Murali|AUTHOR Vignesh Murali]], [[Jackson Liscombe|AUTHOR Jackson Liscombe]], [[Dirk Schnelle-Walka|AUTHOR Dirk Schnelle-Walka]], [[Patrick Lange|AUTHOR Patrick Lange]], [[David Suendermann-Oeft|AUTHOR David Suendermann-Oeft]]
</p><p class="cpabstractcardaffiliationlist">Modality.AI, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 492–493&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We demonstrate a multimodal conversational platform for remote patient diagnosis and monitoring. The platform engages patients in an interactive dialog session and automatically computes metrics relevant to speech acoustics and articulation, oro-motor and oro-facial movement, cognitive function and respiratory function. The dialog session includes a selection of exercises that have been widely used in both speech language pathology research as well as clinical practice — an oral motor exam, sustained phonation, diadochokinesis, read speech, spontaneous speech, spirometry, picture description, emotion elicitation and other cognitive tasks. Finally, the system automatically computes speech, video, cognitive and respiratory biomarkers that have been shown to be useful in capturing various aspects of speech motor function and neurological health and visualizes them in a user-friendly dashboard.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Baihan Lin|AUTHOR Baihan Lin]], [[Xinxin Zhang|AUTHOR Xinxin Zhang]]
</p><p class="cpabstractcardaffiliationlist">University of Washington, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 494–495&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We proposed a novel AI framework to conduct real-time multi-speaker recognition without any prior registration or pretraining by learning the speaker identification on the fly. We considered the practical problem of online learning with episodically revealed rewards and introduced a solution based on semi-supervised and self-supervised learning methods in a web-based application at https://www.baihan.nyc/viz/VoiceID/</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guanjun Li|AUTHOR Guanjun Li]]^^1^^, [[Shan Liang|AUTHOR Shan Liang]]^^1^^, [[Shuai Nie|AUTHOR Shuai Nie]]^^1^^, [[Wenju Liu|AUTHOR Wenju Liu]]^^1^^, [[Zhanlei Yang|AUTHOR Zhanlei Yang]]^^2^^, [[Longshuai Xiao|AUTHOR Longshuai Xiao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Huawei Technologies, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 51–55&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The elastic spatial filter (ESF) proposed in recent years is a popular multi-channel speech enhancement front end based on deep neural network (DNN). It is suitable for real-time processing and has shown promising automatic speech recognition (ASR) results. However, the ESF only utilizes the knowledge of fixed beamforming, resulting in limited noise reduction capabilities. In this paper, we propose a DNN-based generalized sidelobe canceller (GSC) that can automatically track the target speaker’s direction in real time and use the blocking technique to generate reference noise signals to further reduce noise from the fixed beam pointing to the target direction. The coefficients in the proposed GSC are fully learnable and an ASR criterion is used to optimize the entire network. The 4-channel experiments show that the proposed GSC achieves a relative word error rate improvement of 27.0% compared to the raw observation, 20.6% compared to the oracle direction-based traditional GSC, 10.5% compared to the ESF and 7.9% compared to the oracle mask-based generalized eigenvalue (GEV) beamformer.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yan-Hui Tu|AUTHOR Yan-Hui Tu]]^^1^^, [[Jun Du|AUTHOR Jun Du]]^^1^^, [[Lei Sun|AUTHOR Lei Sun]]^^1^^, [[Feng Ma|AUTHOR Feng Ma]]^^1^^, [[Jia Pan|AUTHOR Jia Pan]]^^1^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^Georgia Tech, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 96–100&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a space-and-speaker-aware iterative mask estimation (SSA-IME) approach to improving complex angular central Gaussian distributions (cACGMM) based beamforming in an iterative manner by leveraging upon the complementary information obtained from SSA-based regression. First, a mask calculated by beamformed speech features is proposed to enhance the estimation accuracy of the ideal ratio mask from noisy speech. Second, the outputs of cACGMM-beamformed speech with given time annotation as initial values are used to extract the log-power spectral and inter-phase difference features of different speakers serving as inputs to estimate the regression-based SSA model. Finally, in decoding, the mask estimated by the SSA model is also used to iteratively refine cACGMM-based masks, yielding enhanced multi-array speech. Tested on the recent CHiME-6 Challenge Track 1 tasks, the proposed SSA-IME framework significantly and consistently outperforms state-of-the-art approaches, and achieves the lowest word error rates for both Track 1 speech recognition tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yong Xu|AUTHOR Yong Xu]]^^1^^, [[Meng Yu|AUTHOR Meng Yu]]^^1^^, [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]]^^1^^, [[Lianwu Chen|AUTHOR Lianwu Chen]]^^2^^, [[Chao Weng|AUTHOR Chao Weng]]^^1^^, [[Jianming Liu|AUTHOR Jianming Liu]]^^1^^, [[Dong Yu|AUTHOR Dong Yu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tencent, USA; ^^2^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 56–60&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Purely neural network (NN) based speech separation and enhancement methods, although can achieve good objective scores, inevitably cause nonlinear speech distortions that are harmful for the automatic speech recognition (ASR). On the other hand, the minimum variance distortionless response (MVDR) beamformer with NN-predicted masks, although can significantly reduce speech distortions, has limited noise reduction capability. In this paper, we propose a multi-tap MVDR beamformer with complex-valued masks for speech separation and enhancement. Compared to the state-of-the-art NN-mask based MVDR beamformer, the multi-tap MVDR beamformer exploits the inter-frame correlation in addition to the inter-microphone correlation that is already utilized in prior arts. Further improvements include the replacement of the real-valued masks with the complex-valued masks and the joint training of the complex-mask NN. The evaluation on our multi-modal multi-channel target speech separation and enhancement platform demonstrates that our proposed multi-tap MVDR beamformer improves both the ASR accuracy and the perceptual speech quality against prior arts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Li Li|AUTHOR Li Li]]^^1^^, [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]^^2^^, [[Shoji Makino|AUTHOR Shoji Makino]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Tsukuba, Japan; ^^2^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 61–65&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes an online dual-microphone system for directional speech enhancement, which employs geometrically constrained independent vector analysis (IVA) based on the auxiliary function approach and vectorwise coordinate descent. Its offline version has recently been proposed and shown to outperform the conventional auxiliary function approach-based IVA (AuxIVA) thanks to the properly designed spatial constraints. We extend the offline algorithm to online by incorporating the autoregressive approximation of an auxiliary variable. Experimental evaluations revealed that the proposed online algorithm could work in real-time and achieved superior speech enhancement performance to online AuxIVA in both situations where a fixed target was interfered by a spatially stationary or dynamic interference.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Meng Yu|AUTHOR Meng Yu]]^^1^^, [[Xuan Ji|AUTHOR Xuan Ji]]^^2^^, [[Bo Wu|AUTHOR Bo Wu]]^^2^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Dong Yu|AUTHOR Dong Yu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tencent, USA; ^^2^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 66–70&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The performance of keyword spotting (KWS), measured in false alarms and false rejects, degrades significantly under the far field and noisy conditions. In this paper, we propose a multi-look neural network modeling for speech enhancement which simultaneously steers to listen to multiple sampled look directions. The multi-look enhancement is then jointly trained with KWS to form an end-to-end KWS model which integrates the enhanced signals from multiple look directions and leverages an attention mechanism to dynamically tune the model’s attention to the reliable sources. We demonstrate, on our large noisy and far-field evaluation sets, that the proposed approach significantly improves the KWS performance against the baseline KWS system and a recent beamformer based multi-beam KWS system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weilong Huang|AUTHOR Weilong Huang]]^^1^^, [[Jinwei Feng|AUTHOR Jinwei Feng]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Alibaba Group, China; ^^2^^Alibaba Group, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 71–75&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Use of omni-directional microphones is commonly assumed in the differential beamforming with uniform circular arrays. The conventional differential beamforming with omni-directional elements tends to suffer in low white-noise-gain (WNG) at the low frequencies and decrease of directivity factor (DF) at high frequencies. WNG measures the robustness of beamformer and DF evaluates the array performance in the presence of reverberation. The major contributions of this paper are as follows: First, we extends the existing work by presenting a new approach with the use of the directional microphone elements, and show clearly the connection between the conventional beamforming and the proposed beamforming. Second, a comparative study is made to show that the proposed approach brings about the noticeable improvement in WNG at the low frequencies and some improvement in DF at the high frequencies by exploiting an additional degree of freedom in the differential beamforming design. In addition, the beampattern appears more frequency-invariant than that of the conventional method. Third, we study how the proposed beamformer performs as the number of microphone elements and the radius of the array vary.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jun Qi|AUTHOR Jun Qi]]^^1^^, [[Hu Hu|AUTHOR Hu Hu]]^^1^^, [[Yannan Wang|AUTHOR Yannan Wang]]^^2^^, [[Chao-Han Huck Yang|AUTHOR Chao-Han Huck Yang]]^^1^^, [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]]^^1^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Georgia Tech, USA; ^^2^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 76–80&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates different trade-offs between the number of model parameters and enhanced speech qualities by employing several deep tensor-to-vector regression models for speech enhancement. We find that a hybrid architecture, namely CNN-TT, is capable of maintaining a good quality performance with a reduced model parameter size. CNN-TT is composed of several convolutional layers at the bottom for feature extraction to improve speech quality and a tensor-train (TT) output layer on the top to reduce model parameters. We first derive a new upper bound on the generalization power of the convolutional neural network (CNN) based vector-to-vector regression models. Then, we provide experimental evidence on the Edinburgh noisy speech corpus to demonstrate that, in single-channel speech enhancement, CNN outperforms DNN at the expense of a small increment of model sizes. Besides, CNN-TT slightly outperforms the CNN counterpart by utilizing only 32% of the CNN model parameters. Besides, further performance improvement can be attained if the number of CNN-TT parameters is increased to 44% of the CNN model size. Finally, our experiments of multi-channel speech enhancement on a simulated noisy WSJ0 corpus demonstrate that our proposed hybrid CNN-TT architecture achieves better results than both DNN and CNN models in terms of better-enhanced speech qualities and smaller parameter sizes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jian Wu|AUTHOR Jian Wu]]^^1^^, [[Zhuo Chen|AUTHOR Zhuo Chen]]^^2^^, [[Jinyu Li|AUTHOR Jinyu Li]]^^2^^, [[Takuya Yoshioka|AUTHOR Takuya Yoshioka]]^^2^^, [[Zhili Tan|AUTHOR Zhili Tan]]^^3^^, [[Edward Lin|AUTHOR Edward Lin]]^^3^^, [[Yi Luo|AUTHOR Yi Luo]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^Microsoft, USA; ^^3^^Microsoft, China; ^^4^^Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 81–85&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multi-speaker speech recognition has been one of the key challenges in conversation transcription as it breaks the single active speaker assumption employed by most state-of-the-art speech recognition systems. Speech separation is considered as a remedy to this problem. Previously, we introduced a system, called //unmixing, fixed-beamformer// and //extraction// (UFE), that was shown to be effective in addressing the speech overlap problem in conversation transcription. With UFE, an input mixed signal is processed by fixed beamformers, followed by a neural network post filtering. Although promising results were obtained, the system contains multiple individually developed modules, leading potentially sub-optimum performance. In this work, we introduce an end-to-end modeling version of UFE. To enable gradient propagation all the way, an attentional selection module is proposed, where an attentional weight is learnt for each beamformer and spatial feature sampled over space. Experimental results show that the proposed system achieves comparable performance in an offline evaluation with the original separate processing-based pipeline, while producing remarkable improvements in an online evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu Nakagome|AUTHOR Yu Nakagome]]^^1^^, [[Masahito Togami|AUTHOR Masahito Togami]]^^2^^, [[Tetsuji Ogawa|AUTHOR Tetsuji Ogawa]]^^1^^, [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Waseda University, Japan; ^^2^^LINE, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 86–90&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>//Mentoring-reverse mentoring//, which is a novel knowledge transfer framework for unsupervised learning, is introduced in multi-channel speech source separation. This framework aims to improve two different systems, which are referred to as a //senior// and a //junior// system, by mentoring each other. The senior system, which is composed of a neural separator and a statistical blind source separation (BSS) model, generates a pseudo-target signal. The junior system, which is composed of a neural separator and a post-filter, was constructed using teacher-student learning with the pseudo-target signal generated from the senior system i.e, imitating the output from the senior system (mentoring step). Then, the senior system can be improved by propagating the shared neural separator of the grown-up junior system to the senior system (reverse mentoring step). Since the improved neural separator can give better initial parameters for the statistical BSS model, the senior system can yield more accurate pseudo-target signals, leading to iterative improvement of the pseudo-target signal generator and the neural separator. Experimental comparisons conducted under the condition where mixture-clean parallel data are not available demonstrated that the proposed mentoring-reverse mentoring framework yielded improvements in speech source separation over the existing unsupervised source separation methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]], [[Rintaro Ikeshita|AUTHOR Rintaro Ikeshita]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Hiroshi Sawada|AUTHOR Hiroshi Sawada]], [[Shoko Araki|AUTHOR Shoko Araki]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 91–95&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes new blind signal processing techniques for optimizing a multi-input multi-output (MIMO) convolutional beamformer (CBF) in a computationally efficient way to simultaneously perform dereverberation and source separation. For effective CBF optimization, a conventional technique factorizes it into a multiple-target weighted prediction error (WPE) based dereverberation filter and a separation matrix. However, this technique requires the calculation of a huge spatio-temporal covariance matrix that reflects the statistics of all the sources, which makes the computational cost very high. For computationally efficient optimization, this paper introduces two techniques: one that decomposes the huge covariance matrix into ones for individual sources, and another that decomposes the CBF into sub-filters for estimating individual sources. Both techniques effectively and substantively reduce the size of the covariance matrices that must calculated, and allow us to greatly reduce the computational cost without loss of optimality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hmamouche Youssef|AUTHOR Hmamouche Youssef]]^^1^^, [[Prévot Laurent|AUTHOR Prévot Laurent]]^^2^^, [[Ochs Magalie|AUTHOR Ochs Magalie]]^^1^^, [[Chaminade Thierry|AUTHOR Chaminade Thierry]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIS (UMR 7020), France; ^^2^^LPL (UMR 7309), France; ^^3^^INT (UMR 7289), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 101–105&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Characterizing precisely neurophysiological activity involved in natural conversations remains a major challenge. We explore in this paper the relationship between multimodal conversational behavior and brain activity during natural conversations. This is challenging due to Functional Magnetic Resonance Imaging (fMRI) time resolution and to the diversity of the recorded multimodal signals. We use a unique corpus including localized brain activity and behavior recorded during a fMRI experiment when several participants had natural conversations alternatively with a human and a conversational robot. The corpus includes fMRI responses as well as conversational signals that consist of synchronized raw audio and their transcripts, video and eye-tracking recordings. The proposed approach includes a first step to extract discrete neurophysiological time-series from functionally well defined brain areas, as well as behavioral time-series describing specific behaviors. Then, machine learning models are applied to predict neurophysiological time-series based on the extracted behavioral features. The results show promising prediction scores, and specific causal relationships are found between behaviors and the activity in functional brain areas for both conditions, i.e., human-human and human-robot conversations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Di Zhou|AUTHOR Di Zhou]]^^1^^, [[Gaoyan Zhang|AUTHOR Gaoyan Zhang]]^^2^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^3^^, [[Shuang Wu|AUTHOR Shuang Wu]]^^2^^, [[Zhuo Zhang|AUTHOR Zhuo Zhang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^JAIST, Japan; ^^2^^Tianjin University, China; ^^3^^JAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 106–110&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Reconstruction of speech envelope from neural signal is a general way to study neural entrainment, which helps to understand the neural mechanism underlying speech processing. Previous neural entrainment studies were mainly based on single-trial neural activities, and the reconstruction accuracy of speech envelope is not high enough, probably due to the interferences from diverse noises such as breath and heartbeat. Considering that such noises independently emerge in the consistent neural processing of the subjects responding to the same speech stimulus, we proposed a method to align and average electroencephalograph (EEG) signals of the subjects for the same stimuli to reduce the noises of neural signals. Pearson correlation of constructed speech envelops with the original ones showed a great improvement comparing to the single-trial based method. Our study improved the correlation coefficient in delta band from around 0.25 to 0.5, where 0.25 was obtained in previous leading studies based on single-trial. The speech tracking phenomenon not only occurred in the commonly reported delta and theta band, but also occurred in the gamma band of EEG. Moreover, the reconstruction accuracy for regular speech was higher than that for the time-reversed speech, suggesting that neural entrainment to natural speech envelope reflects speech semantics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chongyuan Lian|AUTHOR Chongyuan Lian]]^^1^^, [[Tianqi Wang|AUTHOR Tianqi Wang]]^^1^^, [[Mingxiao Gu|AUTHOR Mingxiao Gu]]^^1^^, [[Manwa L. Ng|AUTHOR Manwa L. Ng]]^^2^^, [[Feiqi Zhu|AUTHOR Feiqi Zhu]]^^3^^, [[Lan Wang|AUTHOR Lan Wang]]^^1^^, [[Nan Yan|AUTHOR Nan Yan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^HKU, China; ^^3^^Shenzhen Luohu People’s Hospital, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 111–115&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Alterations in speech and language are typical signs of mild cognitive impairment (MCI), considered to be the prodromal stage of Alzheimer’s disease (AD). Yet, very few studies have pointed out at what stage their speech production is disrupted. To bridge this knowledge gap, the present study focused on lexical retrieval, a specific process during speech production, and investigated how it is affected in cognitively impairment patients with the state-of-the-art analysis of brain functional network. 17 patients with MCI and 20 age-matched controls were invited to complete a primed picture naming task, of which the prime was either semantically related or unrelated to the target. Using electroencephalography (EEG) signals collected during task performance, even-related potentials (ERPs) were analyzed, together with the construction of the brain functional network. Results showed that whereas MCI patients did not exhibit significant differences in reaction time and ERP responses, their brain functional network did alter associated with a significant main effect in accuracy. The observation of increased cluster coefficients and characteristic path length indicated deteriorations in global information processing, which provided evidence that deficits in lexical retrieval might have occurred even at the preclinical stage of AD.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhen Fu|AUTHOR Zhen Fu]], [[Jing Chen|AUTHOR Jing Chen]]
</p><p class="cpabstractcardaffiliationlist">Peking University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 116–120&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Listeners usually have the ability to selectively attend to the target speech while ignoring competing sounds. The mechanism that top-down attention modulates the cortical envelope tracking to speech was proposed to account for this ability. Additional visual input, such as lipreading was considered beneficial for speech perception, especially in noise. However, the effect of audiovisual (AV) congruency on the dynamic properties of cortical envelope tracking activities was not discussed explicitly. And the involvement of cortical regions processing AV speech was unclear. To solve these issues, electroencephalography (EEG) was recorded while participants attending to one talker from a mixture for several AV conditions (audio-only, congruent and incongruent). Approaches of temporal response functions (TRFs) and inter-trial phase coherence (ITPC) analysis were utilized to index the cortical envelope tracking for each condition. Comparing with the audio-only condition, both indices were enhanced only for the congruent AV condition, and the enhancement was prominent over both the auditory and visual cortex. In addition, timings of different cortical regions involved in cortical envelope tracking activities were subject to stimulus modality. The present work provides new insight into the neural mechanisms of auditory selective attention when visual input is available.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lei Wang|AUTHOR Lei Wang]]^^1^^, [[Ed X. Wu|AUTHOR Ed X. Wu]]^^2^^, [[Fei Chen|AUTHOR Fei Chen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SUSTech, China; ^^2^^HKU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 121–124&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human listeners can recognize target speech streams in complex auditory scenes. The cortical activities can robustly track the amplitude fluctuations of target speech with auditory attentional modulation under a range of signal-to-masker ratios (SMRs). The root-mean-square (RMS) level of the speech signal is a crucial acoustic cue for target speech perception. However, in most studies, the neural-tracking activities were analyzed with the intact speech temporal envelopes, ignoring the characteristic decoding features in different RMS-level-specific speech segments. This study aimed to explore the contributions of high- and middle-RMS-level segments to target speech decoding in noisy conditions based on electroencephalogram (EEG) signals. The target stimulus was mixed with a competing speaker at five SMRs (i.e., 6, 3, 0, -3, and -6 dB), and then the temporal response function (TRF) was used to analyze the relationship between neural responses and high/middle-RMS-level segments. Experimental results showed that target and ignored speech streams had significantly different TRF responses under conditions with the high- or middle-RMS-level segments. Besides, the high- and middle-RMS-level segments elicited different TRF responses in morphological distributions. These results suggested that distinct models could be used in different RMS-level-specific speech segments to better decode target speech with corresponding EEG signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bin Zhao|AUTHOR Bin Zhao]]^^1^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^1^^, [[Gaoyan Zhang|AUTHOR Gaoyan Zhang]]^^2^^, [[Masashi Unoki|AUTHOR Masashi Unoki]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^JAIST, Japan; ^^2^^Tianjin University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 125–129&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human speech processing, either for listening or oral reading, requires dynamic cortical activities that are not only driven by sensory stimuli externally but also influenced by semantic knowledge and speech planning goals internally. Each of these functions has been known to accompany specific rhythmic oscillations and be localized in distributed networks. The question is how the brain organizes these spatially and spectrally distinct functional networks in such a temporal precision that endows us with incredible speech abilities. For clarification, this study conducted an oral reading task with natural sentences and collected simultaneously the involved brain waves, eye movements, and speech signals with high-density EEG and eye movement equipment. By examining the regional oscillatory spectral perturbation and modeling the frequency-specific interregional connections, our results revealed a hierarchical oscillatory mechanism, in which gamma oscillation entrains with the fine-structured sensory input while beta oscillation modulated the sensory output. Alpha oscillation mediated between sensory perception and cognitive function via selective suppression. Theta oscillation synchronized local networks for large-scale coordination. Differing from a single function-frequency-correspondence, the coexistence of multi-frequency oscillations was found to be critical for local regions to communicate remotely and diversely in a larger network.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Louis ten Bosch|AUTHOR Louis ten Bosch]], [[Kimberley Mulder|AUTHOR Kimberley Mulder]], [[Lou Boves|AUTHOR Lou Boves]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 130–134&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In processing behavioral data from auditory lexical decision, reaction times (RT) can be defined relative to stimulus onset or relative to stimulus offset. Using stimulus onset as the reference invokes models that assumes that relevant processing starts immediately, while stimulus offset invokes models that assume that relevant processing can only start when the acoustic input is complete. It is suggested that EEG recordings can be used to tear apart putative processes. EEG analysis requires some kind of time-locking of epochs, so that averaging of multiple signals does not mix up effects of different processes. However, in many lexical decision experiments the duration of the speech stimuli varies substantially. Consequently, processes tied to stimulus offset are not appropriately aligned and might get lost in the averaging process. In this paper we investigate whether the time course of putative processes such as phonetic encoding, lexical access and decision making can be derived from ERPs and from instantaneous power representations in several frequency bands when epochs are time-locked at stimulus onset or stimulus offset. In addition, we investigate whether time-locking at the moment when the response is given can shed light on the decision process per sé.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tanya Talkar|AUTHOR Tanya Talkar]]^^1^^, [[Sophia Yuditskaya|AUTHOR Sophia Yuditskaya]]^^2^^, [[James R. Williamson|AUTHOR James R. Williamson]]^^2^^, [[Adam C. Lammert|AUTHOR Adam C. Lammert]]^^3^^, [[Hrishikesh Rao|AUTHOR Hrishikesh Rao]]^^2^^, [[Daniel Hannon|AUTHOR Daniel Hannon]]^^2^^, [[Anne O’Brien|AUTHOR Anne O’Brien]]^^4^^, [[Gloria Vergara-Diaz|AUTHOR Gloria Vergara-Diaz]]^^4^^, [[Richard DeLaura|AUTHOR Richard DeLaura]]^^2^^, [[Douglas Sturim|AUTHOR Douglas Sturim]]^^2^^, [[Gregory Ciccarelli|AUTHOR Gregory Ciccarelli]]^^2^^, [[Ross Zafonte|AUTHOR Ross Zafonte]]^^4^^, [[Jeffrey Palmer|AUTHOR Jeffrey Palmer]]^^2^^, [[Paolo Bonato|AUTHOR Paolo Bonato]]^^4^^, [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Harvard University, USA; ^^2^^MIT Lincoln Laboratory, USA; ^^3^^Worcester Polytechnic Institute, USA; ^^4^^Spaulding Rehabilitation Hospital, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 135–139&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Between 15% to 40% of mild traumatic brain injury (mTBI) patients experience incomplete recoveries or provide subjective reports of decreased motor abilities, despite a clinically-determined complete recovery. This demonstrates a need for objective measures capable of detecting subclinical residual mTBI, particularly in return-to-duty decisions for warfighters and return-to-play decisions for athletes. In this paper, we utilize features from recordings of directed speech and gait tasks completed by ten healthy controls and eleven subjects with lingering subclinical impairments from an mTBI. We hypothesize that decreased coordination and precision during fine motor movements governing speech production (articulation, phonation, and respiration), as well as during gross motor movements governing gait, can be effective indicators of subclinical mTBI. Decreases in coordination are measured from correlations of vocal acoustic feature time series and torso acceleration time series. We apply eigenspectra derived from these correlations to machine learning models to discriminate between the two subject groups. The fusion of correlation features derived from acoustic and gait time series achieve an AUC of 0.98. This highlights the potential of using the combination of vocal acoustic features from speech tasks and torso acceleration during a simple gait task as a rapid screening tool for subclinical mTBI.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joel Shor|AUTHOR Joel Shor]]^^1^^, [[Aren Jansen|AUTHOR Aren Jansen]]^^2^^, [[Ronnie Maor|AUTHOR Ronnie Maor]]^^1^^, [[Oran Lang|AUTHOR Oran Lang]]^^1^^, [[Omry Tuval|AUTHOR Omry Tuval]]^^1^^, [[Félix de Chaumont Quitry|AUTHOR Félix de Chaumont Quitry]]^^3^^, [[Marco Tagliasacchi|AUTHOR Marco Tagliasacchi]]^^3^^, [[Ira Shavitt|AUTHOR Ira Shavitt]]^^1^^, [[Dotan Emanuel|AUTHOR Dotan Emanuel]]^^1^^, [[Yinnon Haviv|AUTHOR Yinnon Haviv]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, Israel; ^^2^^Google, USA; ^^3^^Google, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 140–144&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The ultimate goal of transfer learning is to reduce labeled data requirements by exploiting a pre-existing embedding model trained for different datasets or tasks. The visual and language communities have established benchmarks to compare embeddings, but the speech community has yet to do so. This paper proposes a benchmark for comparing speech representations on non-semantic tasks, and proposes a representation based on an unsupervised triplet-loss objective. The proposed representation outperforms other representations on the benchmark, and even exceeds state-of-the-art performance on a number of transfer learning tasks. The embedding is trained on a publicly available dataset, and it is tested on a variety of low-resource downstream tasks, including personalization tasks and medical domain. The benchmark⁴, models⁵, and evaluation code⁶ are publicly released.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hirotoshi Takeuchi|AUTHOR Hirotoshi Takeuchi]]^^1^^, [[Kunio Kashino|AUTHOR Kunio Kashino]]^^2^^, [[Yasunori Ohishi|AUTHOR Yasunori Ohishi]]^^2^^, [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Tokyo, Japan; ^^2^^NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 185–189&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Convolutional neural networks have been successfully applied to a variety of audio signal processing tasks including sound source separation, speech recognition and acoustic scene understanding. Since many pitched sounds have a harmonic structure, an operation, called harmonic convolution, has been proposed to take advantages of the structure appearing in the audio signals. However, the computational cost involved is higher than that of normal convolution. This paper proposes a faster calculation method of harmonic convolution called Harmonic Lowering. The method unrolls the input data to a redundant layout so that the normal convolution operation can be applied. The analysis of the runtimes and the number of multiplication operations show that the proposed method accelerates the harmonic convolution 2 to 7 times faster than the conventional method under realistic parameter settings, while no approximation is introduced.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rajeev Rajan|AUTHOR Rajeev Rajan]]^^1^^, [[Aiswarya Vinod Kumar|AUTHOR Aiswarya Vinod Kumar]]^^1^^, [[Ben P. Babu|AUTHOR Ben P. Babu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^College of Engineering Trivandrum, India; ^^2^^RGIT, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 145–149&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a deep neural network (DNN)-based poetic meter classification scheme is proposed using a fusion of musical texture features (MTF) and i-vectors. The experiment is performed in two phases. Initially, the mel-frequency cepstral coefficient (MFCC) features are fused with MTF and classification is done using DNN. MTF include timbral, rhythmic, and melodic features. Later, in the second phase, the MTF is fused with i-vectors and classification is performed. The performance is evaluated using a newly created poetic corpus in Malayalam, one of the prominent languages in India. While the MFCC-MTF/DNN system reports an overall accuracy of 80.83%, the i-vector/MTF fusion reports an overall accuracy of 86.66%. The performance is also compared with a baseline support vector machine (SVM)-based classifier. The results show that the architectural choice of i-vector fusion with MTF on DNN has merit in recognizing meters from recited poems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wang Dai|AUTHOR Wang Dai]]^^1^^, [[Jinsong Zhang|AUTHOR Jinsong Zhang]]^^1^^, [[Yingming Gao|AUTHOR Yingming Gao]]^^2^^, [[Wei Wei|AUTHOR Wei Wei]]^^1^^, [[Dengfeng Ke|AUTHOR Dengfeng Ke]]^^3^^, [[Binghuai Lin|AUTHOR Binghuai Lin]]^^4^^, [[Yanlu Xie|AUTHOR Yanlu Xie]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BLCU, China; ^^2^^Technische Universität Dresden, Germany; ^^3^^CAS, China; ^^4^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 150–154&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Formant tracking is one of the most fundamental problems in speech processing. Traditionally, formants are estimated using signal processing methods. Recent studies showed that generic convolutional architectures can outperform recurrent networks on temporal tasks such as speech synthesis and machine translation. In this paper, we explored the use of Temporal Convolutional Network (TCN) for formant tracking. In addition to the conventional implementation, we modified the architecture from three aspects. First, we turned off the “causal” mode of dilated convolution, making the dilated convolution see the future speech frames. Second, each hidden layer reused the output information from //all// the previous layers through dense connection. Third, we also adopted a gating mechanism to alleviate the problem of gradient disappearance by selectively forgetting unimportant information. The model was validated on the open access formant database VTR. The experiment showed that our proposed model was easy to converge and achieved an overall mean absolute percent error (MAPE) of 8.2% on speech-labeled frames, compared to three competitive baselines of 9.4% (LSTM), 9.1% (Bi-LSTM) and 8.9% (TCN).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Na Hu|AUTHOR Na Hu]]^^1^^, [[Berit Janssen|AUTHOR Berit Janssen]]^^1^^, [[Judith Hanssen|AUTHOR Judith Hanssen]]^^2^^, [[Carlos Gussenhoven|AUTHOR Carlos Gussenhoven]]^^3^^, [[Aoju Chen|AUTHOR Aoju Chen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universiteit Utrecht, The Netherlands; ^^2^^Avans Hogeschool, The Netherlands; ^^3^^Radboud Universiteit, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 155–159&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we present a publicly available tool for automatic analysis of speech prosody (AASP) in Dutch. Incorporating the state-of-the-art analytical frameworks, AASP enables users to analyze prosody at two levels from different theoretical perspectives. Holistically, by means of the Functional Principal Component Analysis (FPCA) it generates mathematical functions that capture changes in the shape of a pitch contour. The tool outputs the weights of principal components in a table for users to process in further statistical analysis. Structurally, AASP analyzes prosody in terms of prosodic events within the auto-segmental metrical framework, hypothesizing prosodic labels in accordance with Transcription of Dutch Intonation (ToDI) with accuracy comparable to similar tools for other languages. Published as a Docker container, the tool can be set up on various operating systems in only two steps. Moreover, the tool is accessed through a graphic user interface, making it accessible to users with limited programming skills.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adrien Gresse|AUTHOR Adrien Gresse]], [[Mathias Quillot|AUTHOR Mathias Quillot]], [[Richard Dufour|AUTHOR Richard Dufour]], [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]
</p><p class="cpabstractcardaffiliationlist">LIA (EA 4128), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 160–164&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The search for professional voice-actors for audiovisual productions is a sensitive task, performed by the artistic directors (ADs). The ADs have a strong appetite for new talents/voices but cannot perform large scale auditions. Automatic tools able to suggest the most suited voices are of a great interest for audiovisual industry. In previous works, we showed the existence of acoustic information allowing to mimic the AD’s choices. However, the only available information is the ADs’ choices from the already dubbed multimedia productions. In this paper, we propose a representation-learning based strategy to build a character/role representation, called p-vector. In addition, the large variability between audiovisual productions makes it difficult to have homogeneous training datasets. We overcome this difficulty by using knowledge distillation methods to take advantage of external datasets. Experiments are conducted on video-game voice excerpts. Results show a significant improvement using the p-vector, compared to the speaker-based x-vector representation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[B. Yegnanarayana|AUTHOR B. Yegnanarayana]], [[Anand Joseph|AUTHOR Anand Joseph]], [[Vishala Pannala|AUTHOR Vishala Pannala]]
</p><p class="cpabstractcardaffiliationlist">IIIT Hyderabad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 165–169&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Formants are resonances of the time varying vocal tract system, and their characteristics are reflected in the response of the system for a sequence of impulse-like excitation sequence originated at the glottis. This paper presents a method to enhance the formants information in the display of spectrogram of the speech signal, especially for high pitched voices. It is well known that in the narrowband spectrogram, the presence of pitch harmonics masks the formant information, whereas in the wideband spectrogram, the formant regions are smeared. Using single frequency filtering (SFF) analysis, we show that the wideband equivalent SFF spectrogram can be modified to enhance the formant information in the display by improving the frequency resolution. For this, we obtain two SFF spectrograms by using single frequency filtering of the speech signal at two closely spaced roots on the real axis in the z-plane. The ratio or difference of the two SFF spectrograms is processed to enhance the formant information in the spectrographic display. This will help in tracking rapidly changing formants and in resolving closely spaced formants. The effect is more pronounced in the case of high-pitched voices, like female and children speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michael Gump|AUTHOR Michael Gump]], [[Wei-Ning Hsu|AUTHOR Wei-Ning Hsu]], [[James Glass|AUTHOR James Glass]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 170–174&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Disentanglement is a desired property in representation learning and a significant body of research has tried to show that it is a useful representational prior. Evaluating disentanglement is challenging, particularly for real world data like speech, where ground truth generative factors are typically not available. Previous work on disentangled representation learning in speech has used categorical supervision like phoneme or speaker identity in order to disentangle grouped feature spaces. However, this work differs from the typical dimension-wise view of disentanglement in other domains. This paper proposes to use low-level acoustic features to provide the structure required to evaluate dimension-wise disentanglement. By choosing well-studied acoustic features, grounded and descriptive evaluation is made possible for unsupervised representation learning. This work produces a toolkit for evaluating disentanglement in unsupervised representations of speech and evaluates its efficacy on previous research.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dung N. Tran|AUTHOR Dung N. Tran]], [[Uros Batricevic|AUTHOR Uros Batricevic]], [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 175–179&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Accurate voiced/unvoiced information is crucial in estimating the pitch of a target speech signal in severe nonstationary noise environments. Nevertheless, state-of-the-art pitch estimators based on deep neural networks (DNN) lack a dedicated mechanism for robustly detecting voiced and unvoiced segments in the target speech in noisy conditions. In this work, we proposed an end-to-end deep learning-based pitch estimation framework which jointly detects voiced/unvoiced segments and predicts pitch values for the voiced regions of the ground-truth speech. We empirically showed that our proposed framework significantly more robust than state-of-the-art DNN based pitch detectors in nonstationary noise settings. Our results suggest that joint training of voiced/unvoiced detection and voiced pitch prediction can significantly improve pitch estimation performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amrith Setlur|AUTHOR Amrith Setlur]], [[Barnabás Póczos|AUTHOR Barnabás Póczos]], [[Alan W. Black|AUTHOR Alan W. Black]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 180–184&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper extends recent work on nonlinear Independent Component Analysis (ICA) by introducing a theoretical framework for nonlinear Independent Subspace Analysis (ISA) in the presence of auxiliary variables. Observed high dimensional acoustic features like log Mel spectrograms can be considered as surface level manifestations of nonlinear transformations over individual multivariate sources of information like speaker characteristics, phonological content etc. Under assumptions of energy based models we use the theory of nonlinear ISA to propose an algorithm that learns unsupervised speech representations whose subspaces are independent and potentially highly correlated with the original non-stationary multivariate sources. We show how nonlinear ICA with auxiliary variables can be extended to a generic identifiable model for subspaces as well while also providing sufficient conditions for the identifiability of these high dimensional subspaces. Our proposed methodology is generic and can be integrated with standard unsupervised approaches to learn speech representations with subspaces that can theoretically capture independent higher order speech signals. We evaluate the gains of our algorithm when integrated with the Autoregressive Predictive Coding (APC) model by showing empirical results on the speaker verification and phoneme recognition tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yang Ai|AUTHOR Yang Ai]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 190–194&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In our previous work, we have proposed a neural vocoder called HiNet which recovers speech waveforms by predicting amplitude and phase spectra hierarchically from input acoustic features. In HiNet, the amplitude spectrum predictor (ASP) predicts log amplitude spectra (LAS) from input acoustic features. This paper proposes a novel knowledge-and-data-driven ASP (KDD-ASP) to improve the conventional one. First, acoustic features (i.e., F0 and mel-cepstra) pass through a knowledge-driven LAS recovery module to obtain approximate LAS (ALAS). This module is designed based on the combination of STFT and source-filter theory, in which the source part and the filter part are designed based on input F0 and mel-cepstra, respectively. Then, the recovered ALAS are processed by a data-driven LAS refinement module which consists of multiple trainable convolutional layers to get the final LAS. Experimental results show that the HiNet vocoder using KDD-ASP can achieve higher quality of synthetic speech than that using conventional ASP and the WaveRNN vocoder on a text-to-speech (TTS) task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dipjyoti Paul|AUTHOR Dipjyoti Paul]]^^1^^, [[Yannis Pantazis|AUTHOR Yannis Pantazis]]^^2^^, [[Yannis Stylianou|AUTHOR Yannis Stylianou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Crete, Greece; ^^2^^FORTH, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 235–239&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advancements in deep learning led to human-level performance in single-speaker speech synthesis. However, there are still limitations in terms of speech quality when generalizing those systems into multiple-speaker models especially for unseen speakers and unseen recording qualities. For instance, conventional neural vocoders are adjusted to the training speaker and have poor generalization capabilities to unseen speakers. In this work, we propose a variant of WaveRNN, referred to as speaker conditional WaveRNN (SC-WaveRNN). We target towards the development of an efficient universal vocoder even for unseen speakers and recording conditions. In contrast to standard WaveRNN, SC-WaveRNN exploits additional information given in the form of speaker embeddings. Using publicly-available data for training, SC-WaveRNN achieves significantly better performance over baseline WaveRNN on both subjective and objective metrics. In MOS, SC-WaveRNN achieves an improvement of about 23% for seen speaker and seen recording condition and up to 95% for unseen speaker and unseen condition. Finally, we extend our work by implementing a multi-speaker text-to-speech (TTS) synthesis similar to zero-shot speaker adaptation. In terms of performance, our system has been preferred over the baseline TTS system by 60% over 15.5% and by 60.9% over 32.6%, for seen and unseen speakers, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhijun Liu|AUTHOR Zhijun Liu]], [[Kuan Chen|AUTHOR Kuan Chen]], [[Kai Yu|AUTHOR Kai Yu]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 240–244&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose the neural homomorphic vocoder (NHV), a source-filter model based neural vocoder framework. NHV synthesizes speech by filtering impulse trains and noise with linear time-varying (LTV) filters. A neural network controls the LTV filters by estimating complex cepstrums of time-varying impulse responses given acoustic features. The proposed framework can be trained with a combination of multi-resolution STFT loss and adversarial loss functions. Due to the use of DSP-based synthesis methods, NHV is highly efficient, fully controllable and interpretable. A vocoder was built under the framework to synthesize speech given log-Mel spectrograms and fundamental frequencies. While the model cost only 15 kFLOPs per sample, the synthesis quality remained comparable to baseline neural vocoders in both copy-synthesis and text-to-speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qiao Tian|AUTHOR Qiao Tian]], [[Zewang Zhang|AUTHOR Zewang Zhang]], [[Heng Lu|AUTHOR Heng Lu]], [[Ling-Hui Chen|AUTHOR Ling-Hui Chen]], [[Shan Liu|AUTHOR Shan Liu]]
</p><p class="cpabstractcardaffiliationlist">Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 195–199&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose the FeatherWave, yet another variant of WaveRNN vocoder combining the multi-band signal processing and the linear predictive coding. The LPCNet, a recently proposed neural vocoder which utilized the linear predictive characteristic of speech signal in the WaveRNN architecture, can generate high quality speech with a speed faster than real-time on a single CPU core. However, LPCNet is still not efficient enough for online speech generation tasks. To address this issue, we adopt the multi-band linear predictive coding for WaveRNN vocoder. The multi-band method enables the model to generate several speech samples in parallel at one step. Therefore, it can significantly improve the efficiency of speech synthesis. The proposed model with 4 sub-bands needs less than 1.6 GFLOPS for speech generation. In our experiments, it can generate 24 kHz high-fidelity audio 9× faster than real-time on a single CPU, which is much faster than the LPCNet vocoder. Furthermore, our subjective listening test shows that the FeatherWave can generate speech with better quality than LPCNet.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jinhyeok Yang|AUTHOR Jinhyeok Yang]]^^1^^, [[Junmo Lee|AUTHOR Junmo Lee]]^^1^^, [[Youngik Kim|AUTHOR Youngik Kim]]^^1^^, [[Hoon-Young Cho|AUTHOR Hoon-Young Cho]]^^1^^, [[Injung Kim|AUTHOR Injung Kim]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NCSOFT, Korea; ^^2^^Handong Global University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 200–204&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel high-fidelity real-time neural vocoder called VocGAN. A recently developed GAN-based vocoder, MelGAN, produces speech waveforms in real-time. However, it often produces a waveform that is insufficient in quality or inconsistent with acoustic characteristics of the input mel spectrogram. VocGAN is nearly as fast as MelGAN, but it significantly improves the quality and consistency of the output waveform. VocGAN applies a multi-scale waveform generator and a hierarchically-nested discriminator to learn multiple levels of acoustic properties in a balanced way. It also applies the joint conditional and unconditional objective, which has shown successful results in high-resolution image synthesis. In experiments, VocGAN synthesizes speech waveforms 416.7× faster on a GTX 1080Ti GPU and 3.24× faster on a CPU than real-time. Compared with MelGAN, it also exhibits significantly improved quality in multiple evaluation metrics including mean opinion score (MOS) with minimal additional overhead. Additionally, compared with Parallel WaveGAN, another recently developed high-fidelity vocoder, VocGAN is 6.98× faster on a CPU and exhibits higher MOS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hiroki Kanagawa|AUTHOR Hiroki Kanagawa]], [[Yusuke Ijima|AUTHOR Yusuke Ijima]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 205–209&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a lightweight neural vocoder based on LPCNet. The recently proposed LPCNet exploits linear predictive coding to represent vocal tract characteristics, and can rapidly synthesize high-quality waveforms with fewer parameters than WaveRNN. For even greater speeds, it is necessary to reduce the time-heavy two GRUs and the DualFC. Although the original work only pruned the first GRU weight, there is room for improvements in the other GRU and DualFC. Accordingly, we use tensor decomposition to reduce these remaining parameters by more than 80%. For the proposed method we demonstrate that 1) it is 1.26 times faster on a CPU, and 2) it matched naturalness of the original LPCNet for acoustic features extracted from natural speech and for those predicted by TTS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Po-chun Hsu|AUTHOR Po-chun Hsu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 210–214&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose WG-WaveNet, a fast, lightweight, and high-quality waveform generation model. WG-WaveNet is composed of a compact flow-based model and a post-filter. The two components are jointly trained by maximizing the likelihood of the training data and optimizing loss functions on the frequency domains. As we design a flow-based model that is heavily compressed, the proposed model requires much less computational resources compared to other waveform generation models during both training and inference time; even though the model is highly compressed, the post-filter maintains the quality of generated waveform. Our PyTorch implementation can be trained using less than 8 GB GPU memory and generates audio samples at a rate of more than 960 kHz on an NVIDIA 1080Ti GPU. Furthermore, even if synthesizing on a CPU, we show that the proposed method is capable of generating 44.1 kHz speech waveform 1.2 times faster than real-time. Experiments also show that the quality of generated audio is comparable to those of other methods. Audio samples are publicly available online.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Brooke Stephenson|AUTHOR Brooke Stephenson]]^^1^^, [[Laurent Besacier|AUTHOR Laurent Besacier]]^^2^^, [[Laurent Girin|AUTHOR Laurent Girin]]^^1^^, [[Thomas Hueber|AUTHOR Thomas Hueber]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^GIPSA-lab (UMR 5216), France; ^^2^^LIG (UMR 5217), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 215–219&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In incremental text to speech synthesis (iTTS), the synthesizer produces an audio output before it has access to the entire input sentence. In this paper, we study the behavior of a neural sequence-to-sequence TTS system when used in an incremental mode, i.e. when generating speech output for token n, the system has access to //n+k// tokens from the text sequence. We first analyze the impact of this incremental policy on the evolution of the encoder representations of token n for different values of k (the lookahead parameter). The results show that, on average, tokens travel 88% of the way to their full context representation with a one-word lookahead and 94% after 2 words. We then investigate which text features are the most influential on the evolution towards the final representation using a random forest analysis. The results show that the most salient factors are related to token length. We finally evaluate the effects of lookahead k at the decoder level, using a MUSHRA listening test. This test shows results that contrast with the above high figures: speech synthesis quality obtained with 2 word-lookahead is significantly lower than the one obtained with the full sentence.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vadim Popov|AUTHOR Vadim Popov]], [[Stanislav Kamenev|AUTHOR Stanislav Kamenev]], [[Mikhail Kudinov|AUTHOR Mikhail Kudinov]], [[Sergey Repyevsky|AUTHOR Sergey Repyevsky]], [[Tasnima Sadekova|AUTHOR Tasnima Sadekova]], [[Vitalii Bushaev|AUTHOR Vitalii Bushaev]], [[Vladimir Kryzhanovskiy|AUTHOR Vladimir Kryzhanovskiy]], [[Denis Parkhomenko|AUTHOR Denis Parkhomenko]]
</p><p class="cpabstractcardaffiliationlist">Huawei Technologies, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 220–224&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a fast and lightweight on-device text-to-speech system based on state-of-art methods of feature and speech generation i.e. Tacotron2 and LPCNet. We show that modification of the basic pipeline combined with hardware-specific optimizations and extensive usage of parallelization enables running TTS service even on low-end devices with faster than realtime waveform generation. Moreover, the system preserves high quality of speech without noticeable degradation of Mean Opinion Score compared to the non-optimized baseline. While the system is mostly oriented on low-to-mid range hardware we believe that it can also be used in any CPU-based environment.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Song|AUTHOR Wei Song]]^^1^^, [[Guanghui Xu|AUTHOR Guanghui Xu]]^^1^^, [[Zhengchen Zhang|AUTHOR Zhengchen Zhang]]^^1^^, [[Chao Zhang|AUTHOR Chao Zhang]]^^2^^, [[Xiaodong He|AUTHOR Xiaodong He]]^^1^^, [[Bowen Zhou|AUTHOR Bowen Zhou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^JD.com, China; ^^2^^JD.com, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 225–229&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural vocoder, such as WaveGlow, has become an important component in recent high-quality text-to-speech (TTS) systems. In this paper, we propose Efficient WaveGlow (EWG), a flow-based generative model serving as an efficient neural vocoder. Similar to WaveGlow, EWG has a normalizing flow backbone where each flow step consists of an affine coupling layer and an invertible 1×1 convolution. To reduce the number of model parameters and enhance the speed without sacrificing the quality of the synthesized speech, EWG improves WaveGlow in three aspects. First, the WaveNet-style transform network in WaveGlow is replaced with an FFTNet-style dilated convolution network. Next, to reduce the computation cost, group convolution is applied to both audio and local condition features. At last, the local condition is shared among the transform network layers in each coupling layer. As a result, EWG can reduce the number of floating-point operations (FLOPs) required to generate one-second audio and the number of model parameters both by more than 12 times. Experimental results show that EWG can reduce real-world inference time cost by more than twice, without any obvious reduction in the speech quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sébastien Le Maguer|AUTHOR Sébastien Le Maguer]], [[Naomi Harte|AUTHOR Naomi Harte]]
</p><p class="cpabstractcardaffiliationlist">Trinity College Dublin, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 230–234&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Nowadays, synthetic speech is almost indistinguishable from human speech. The remarkable quality is mainly due to the displacing of signal processing based vocoders in favour of neural vocoders and, in particular, the WaveNet architecture. At the same time, speech synthesis evaluation is still facing difficulties in adjusting to these improvements. These difficulties are even more prevalent in the case of objective evaluation methodologies which do not correlate well with human perception. Yet, an often forgotten use of objective evaluation is to uncover prominent differences between speech signals. Such differences are crucial to decipher the improvement introduced by the use of WaveNet. Therefore, abandoning objective evaluation could be a serious mistake. In this paper, we analyze vocoded synthetic speech re-rendered using WaveNet, comparing it to standard vocoded speech. To do so, we objectively compare spectrograms and neurograms, the latter being the output of AN models. The spectrograms allow us to look at the speech production side, and the neurograms relate to the speech perception path. While we were not yet able to pinpoint how WaveNet and WORLD differ, our results suggest that the Mean-Rate (MR) neurograms in particular warrant further investigation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shota Horiguchi|AUTHOR Shota Horiguchi]]^^1^^, [[Yusuke Fujita|AUTHOR Yusuke Fujita]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^, [[Yawen Xue|AUTHOR Yawen Xue]]^^1^^, [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Hitachi, Japan; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 269–273&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end speaker diarization for an unknown number of speakers is addressed in this paper. Recently proposed end-to-end speaker diarization outperformed conventional clustering-based speaker diarization, but it has one drawback: it is less flexible in terms of the number of speakers. This paper proposes a method for encoder-decoder based attractor calculation (EDA), which first generates a flexible number of attractors from a speech embedding sequence. Then, the generated multiple attractors are multiplied by the speech embedding sequence to produce the same number of speaker activities. The speech embedding sequence is extracted using the conventional self-attentive end-to-end neural speaker diarization (SA-EEND) network. In a two-speaker condition, our method achieved a 2.69% diarization error rate (DER) on simulated mixtures and a 8.07% DER on the two-speaker subset of CALLHOME, while vanilla SA-EEND attained 4.56% and 9.54%, respectively. In unknown numbers of speakers conditions, our method attained a 15.29% DER on CALLHOME, while the x-vector-based clustering method achieved a 19.43% DER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ivan Medennikov|AUTHOR Ivan Medennikov]]^^1^^, [[Maxim Korenevsky|AUTHOR Maxim Korenevsky]]^^2^^, [[Tatiana Prisyach|AUTHOR Tatiana Prisyach]]^^2^^, [[Yuri Khokhlov|AUTHOR Yuri Khokhlov]]^^2^^, [[Mariya Korenevskaya|AUTHOR Mariya Korenevskaya]]^^2^^, [[Ivan Sorokin|AUTHOR Ivan Sorokin]]^^2^^, [[Tatiana Timofeeva|AUTHOR Tatiana Timofeeva]]^^2^^, [[Anton Mitrofanov|AUTHOR Anton Mitrofanov]]^^2^^, [[Andrei Andrusenko|AUTHOR Andrei Andrusenko]]^^3^^, [[Ivan Podluzhny|AUTHOR Ivan Podluzhny]]^^2^^, [[Aleksandr Laptev|AUTHOR Aleksandr Laptev]]^^3^^, [[Aleksei Romanenko|AUTHOR Aleksei Romanenko]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^STC-innovations, Russia; ^^2^^STC-innovations, Russia; ^^3^^ITMO University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 274–278&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker diarization for real-life scenarios is an extremely challenging problem. Widely used clustering-based diarization approaches perform rather poorly in such conditions, mainly due to the limited ability to handle overlapping speech. We propose a novel Target-Speaker Voice Activity Detection (TS-VAD) approach, which directly predicts an activity of each speaker on each time frame. TS-VAD model takes conventional speech features (e.g., MFCC) along with i-vectors for each speaker as inputs. A set of binary classification output layers produces activities of each speaker. I-vectors can be estimated iteratively, starting with a strong clustering-based diarization.

We also extend the TS-VAD approach to the multi-microphone case using a simple attention mechanism on top of hidden representations extracted from the single-channel TS-VAD model. Moreover, post-processing strategies for the predicted speaker activity probabilities are investigated. Experiments on the CHiME-6 unsegmented data show that TS-VAD achieves state-of-the-art results outperforming the baseline x-vector-based system by more than 30% Diarization Error Rate (DER) abs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hagai Aronowitz|AUTHOR Hagai Aronowitz]]^^1^^, [[Weizhong Zhu|AUTHOR Weizhong Zhu]]^^2^^, [[Masayuki Suzuki|AUTHOR Masayuki Suzuki]]^^3^^, [[Gakuto Kurata|AUTHOR Gakuto Kurata]]^^3^^, [[Ron Hoory|AUTHOR Ron Hoory]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, Israel; ^^2^^IBM, USA; ^^3^^IBM, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 279–283&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, speaker diarization based on speaker embeddings has shown excellent results in many works. In this paper we propose several enhancements throughout the diarization pipeline. This work addresses two clustering frameworks: agglomerative hierarchical clustering (AHC) and spectral clustering (SC).

First, we use multiple speaker embeddings. We show that fusion of x-vectors and d-vectors boosts accuracy significantly. Second, we train neural networks to leverage both acoustic and duration information for scoring similarity of segments or clusters. Third, we introduce a novel method to guide the AHC clustering mechanism using a neural network. Fourth, we handle short duration segments in SC by deemphasizing their effect on setting the number of speakers.

Finally, we propose a novel method for estimating the number of clusters in the SC framework. The method takes each eigenvalue and analyzes the projections of the SC similarity matrix on the corresponding eigenvector.

We evaluated our system on NIST SRE 2000 CALLHOME and, using cross-validation, we achieved an error rate of 5.1%, going beyond state-of-the-art speaker diarization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qingjian Lin|AUTHOR Qingjian Lin]], [[Yu Hou|AUTHOR Yu Hou]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 284–288&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker diarization can be described as the process of extracting sequential speaker embeddings from an audio stream and clustering them according to speaker identities. Nowadays, deep neural network based approaches like x-vector have been widely adopted for speaker embedding extraction. However, in the clustering back-end, probabilistic linear discriminant analysis (PLDA) is still the dominant algorithm for similarity measurement. PLDA works in a pair-wise and independent manner, which may ignore the positional correlation of adjacent speaker embeddings. To address this issue, our previous work proposed the long short-term memory (LSTM) based scoring model, followed by the spectral clustering algorithm. In this paper, we further propose two enhanced methods based on the self-attention mechanism, which no longer focuses on the local correlation but searches for similar speaker embeddings in the whole sequence. The first approach achieves state-of-the-art performance on the DIHARD II Eval Set (18.44% DER after resegmentation), while the second one operates with higher efficiency.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jixuan Wang|AUTHOR Jixuan Wang]]^^1^^, [[Xiong Xiao|AUTHOR Xiong Xiao]]^^2^^, [[Jian Wu|AUTHOR Jian Wu]]^^2^^, [[Ranjani Ramamurthy|AUTHOR Ranjani Ramamurthy]]^^2^^, [[Frank Rudzicz|AUTHOR Frank Rudzicz]]^^1^^, [[Michael Brudno|AUTHOR Michael Brudno]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Toronto, Canada; ^^2^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 289–293&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker attribution is required in many real-world applications, such as meeting transcription, where speaker identity is assigned to each utterance according to speaker voice profiles. In this paper, we propose to solve the speaker attribution problem by using graph-based semi-supervised learning methods. A graph of speech segments is built for each session, on which segments from voice profiles are represented by labeled nodes while segments from test utterances are unlabeled nodes. The weight of edges between nodes is evaluated by the similarities between the pretrained speaker embeddings of speech segments. Speaker attribution then becomes a semi-supervised learning problem on graphs, on which two graph-based methods are applied: label propagation (LP) and graph neural networks (GNNs). The proposed approaches are able to utilize the structural information of the graph to improve speaker attribution performance. Experimental results on real meeting data show that the graph based approaches reduce speaker attribution error by up to 68% compared to a baseline speaker identification approach that processes each utterance independently.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Prachi Singh|AUTHOR Prachi Singh]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 294–298&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The state-of-the-art speaker diarization systems use agglomerative hierarchical clustering (AHC) which performs the clustering of previously learned neural embeddings. While the clustering approach attempts to identify speaker clusters, the AHC algorithm does not involve any further learning. In this paper, we propose a novel algorithm for hierarchical clustering which combines the speaker clustering along with a representation learning framework. The proposed approach is based on principles of self-supervised learning where the self-supervision is derived from the clustering algorithm. The representation learning network is trained with a regularized triplet loss using the clustering solution at the current step while the clustering algorithm uses the deep embeddings from the representation learning step. By combining the self-supervision based representation learning along with the clustering algorithm, we show that the proposed algorithm improves significantly (29% relative improvement) over the AHC algorithm with cosine similarity for a speaker diarization task on CALLHOME dataset. In addition, the proposed approach also improves over the state-of-the-art system with PLDA affinity matrix with 10% relative improvement in DER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joon Son Chung|AUTHOR Joon Son Chung]]^^1^^, [[Jaesung Huh|AUTHOR Jaesung Huh]]^^2^^, [[Arsha Nagrani|AUTHOR Arsha Nagrani]]^^1^^, [[Triantafyllos Afouras|AUTHOR Triantafyllos Afouras]]^^1^^, [[Andrew Zisserman|AUTHOR Andrew Zisserman]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Oxford, UK; ^^2^^University of Oxford, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 299–303&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The goal of this paper is speaker diarisation of videos collected ‘in the wild’.

We make three key contributions. First, we propose an automatic audio-visual diarisation method for YouTube videos. Our method consists of active speaker detection using audio-visual methods and speaker verification using self-enrolled speaker models. Second, we integrate our method into a semi-automatic dataset creation pipeline which significantly reduces the number of hours required to annotate videos with diarisation labels. Finally, we use this pipeline to create a large-scale diarisation dataset called VoxConverse, collected from ‘in the wild’ videos, which we will release publicly to the research community. Our dataset consists of overlapping speech, a large and diverse speaker pool, and challenging background conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wangyou Zhang|AUTHOR Wangyou Zhang]], [[Yanmin Qian|AUTHOR Yanmin Qian]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 304–308&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end multi-speaker speech recognition has been a popular topic in recent years, as more and more researches focus on speech processing in more realistic scenarios. Inspired by the hearing mechanism of human beings, which enables us to concentrate on the interested speaker from the multi-speaker mixed speech by utilizing both audio and context knowledge, this paper explores the contextual information to improve the multi-talker speech recognition. In the proposed architecture, the novel embedding learning model is designed to accurately extract the contextual embedding from the multi-talker mixed speech directly. Then two advanced training strategies are further proposed to improve the new model. Experimental results show that our proposed method achieves a very large improvement on multi-speaker speech recognition, with ~25% relative WER reduction against the baseline end-to-end multi-talker ASR model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jack Deadman|AUTHOR Jack Deadman]], [[Jon Barker|AUTHOR Jon Barker]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 349–353&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Simulated data plays a crucial role in the development and evaluation of novel distant microphone ASR techniques. However, the commonly used simulated datasets adopt uninformed and potentially unrealistic speaker location distributions. We wish to generate more realistic simulations driven by recorded human behaviour. By using devices with a paired microphone array and camera, we analyse unscripted dinner party scenarios (CHiME-5) to estimate the distribution of speaker separation in a realistic setting. We deploy face-detection, and pose-detection techniques on 114 cameras to automatically locate speakers in 20 dinner party sessions. Our analysis found that on average, the separation between speakers was only 17 degrees. We use this analysis to create datasets with realistic distributions and compare it with commonly used datasets of simulated signals. By changing the position of speakers, we show that the word error rate can increase by over 73.5% relative when using a strong speech enhancement and ASR system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhihao Du|AUTHOR Zhihao Du]]^^1^^, [[Jiqing Han|AUTHOR Jiqing Han]]^^1^^, [[Xueliang Zhang|AUTHOR Xueliang Zhang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Harbin Institute of Technology, China; ^^2^^Inner Mongolia University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 309–313&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To improve the noise robustness of automatic speech recognition (ASR), the generative adversarial network (GAN) based enhancement methods are employed as the front-end processing, which comprise a single adversarial process of an enhancement model and a discriminator. In this single adversarial process, the discriminator is encouraged to find differences between the enhanced and clean speeches, but the distribution of clean speeches is ignored. In this paper, we propose a double adversarial network (DAN) by adding another adversarial generation process (AGP), which forces the discriminator not only to find the differences but also to model the distribution. Furthermore, a functional mean square error (f-MSE) is proposed to utilize the representations learned by the discriminator. Experimental results reveal that AGP and f-MSE are crucial for the enhancement performance on ASR task, which are missed in previous GAN-based methods. Specifically, our DAN achieves 13.00% relative word error rate improvements over the noisy speeches on the test set of CHiME-2, which outperforms several recent GAN-based enhancement methods significantly.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Antoine Bruguier|AUTHOR Antoine Bruguier]], [[Ananya Misra|AUTHOR Ananya Misra]], [[Arun Narayanan|AUTHOR Arun Narayanan]], [[Rohit Prabhavalkar|AUTHOR Rohit Prabhavalkar]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 314–318&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Shift-invariance is a desirable property of many machine learning models. It means that delaying the input of a model in time should only result in delaying its prediction in time. A model that is shift-invariant, also eliminates undesirable side effects like frequency aliasing. When building sequence models, not only should the shift-invariance property be preserved when sampling input features, it must also be respected inside the model itself. Here, we study the impact of the commonly used stacking layer in LSTM-based ASR models and show that aliasing is likely to occur. Experimentally, by adding merely 7 parameters to an existing speech recognition model that has 120 million parameters, we are able to reduce the impact of aliasing. This acts as a regularizer that discards frequencies the model shouldn’t be relying on for predictions. Our results show that under conditions unseen at training, we are able to reduce the relative word error rate by up to 5%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andrei Andrusenko|AUTHOR Andrei Andrusenko]], [[Aleksandr Laptev|AUTHOR Aleksandr Laptev]], [[Ivan Medennikov|AUTHOR Ivan Medennikov]]
</p><p class="cpabstractcardaffiliationlist">ITMO University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 319–323&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While end-to-end ASR systems have proven competitive with the conventional hybrid approach, they are prone to accuracy degradation when it comes to noisy and low-resource conditions. In this paper, we argue that, even in such difficult cases, some end-to-end approaches show performance close to the hybrid baseline. To demonstrate this, we use the CHiME-6 Challenge data as an example of challenging environments and noisy conditions of everyday speech. We experimentally compare and analyze CTC-Attention versus RNN-Transducer approaches along with RNN versus Transformer architectures. We also provide a comparison of acoustic features and speech enhancements. Besides, we evaluate the effectiveness of neural network language models for hypothesis re-scoring in low-resource conditions. Our best end-to-end model based on RNN-Transducer, together with improved beam search, reaches quality by only 3.8% WER abs. worse than the LF-MMI TDNN-F CHiME-6 Challenge baseline. With the Guided Source Separation based training data augmentation, this approach outperforms the hybrid baseline system by 2.7% WER abs. and the end-to-end system best known before by 25.7% WER abs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wangyou Zhang|AUTHOR Wangyou Zhang]]^^1^^, [[Aswin Shanmugam Subramanian|AUTHOR Aswin Shanmugam Subramanian]]^^2^^, [[Xuankai Chang|AUTHOR Xuankai Chang]]^^2^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^, [[Yanmin Qian|AUTHOR Yanmin Qian]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SJTU, China; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 324–328&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite successful applications of end-to-end approaches in multi-channel speech recognition, the performance still degrades severely when the speech is corrupted by reverberation. In this paper, we integrate the dereverberation module into the end-to-end multi-channel speech recognition system and explore two different frontend architectures. First, a multi-source mask-based weighted prediction error (WPE) module is incorporated in the frontend for dereverberation. Second, another novel frontend architecture is proposed, which extends the weighted power minimization distortionless response (WPD) convolutional beamformer to perform simultaneous separation and dereverberation. We derive a new formulation from the original WPD, which can handle multi-source input, and replace eigenvalue decomposition with the matrix inverse operation to make the back-propagation algorithm more stable. The above two architectures are optimized in a fully end-to-end manner, only using the speech recognition criterion. Experiments on both spatialized wsj1-2mix corpus and REVERB show that our proposed model outperformed the conventional methods in reverberant scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinchi Qiu|AUTHOR Xinchi Qiu]]^^1^^, [[Titouan Parcollet|AUTHOR Titouan Parcollet]]^^1^^, [[Mirco Ravanelli|AUTHOR Mirco Ravanelli]]^^2^^, [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]^^1^^, [[Mohamed Morchid|AUTHOR Mohamed Morchid]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Oxford, UK; ^^2^^Université de Montréal, Canada; ^^3^^LIA (EA 4128), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 329–333&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite the significant progress in automatic speech recognition (ASR), distant ASR remains challenging due to noise and reverberation. A common approach to mitigate this issue consists of equipping the recording devices with multiple microphones that capture the acoustic scene from different perspectives. These multi-channel audio recordings contain specific internal relations between each signal. In this paper, we propose to capture these inter- and intra- structural dependencies with quaternion neural networks, which can jointly process multiple signals as whole quaternion entities. The quaternion algebra replaces the standard dot product with the Hamilton one, thus offering a simple and elegant way to model dependencies between elements. The quaternion layers are then coupled with a recurrent neural network, which can learn long-term dependencies in the time domain. We show that a quaternion long-short term memory neural network (QLSTM), trained on the concatenated multi-channel speech signals, outperforms equivalent real-valued LSTM on two different tasks of multi-channel distant speech recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hangting Chen|AUTHOR Hangting Chen]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Qian Shi|AUTHOR Qian Shi]], [[Zuozhen Liu|AUTHOR Zuozhen Liu]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 334–338&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The CHiME-6 dataset presents a difficult task with extreme speech overlap, severe noise and a natural speaking style. The gap of the word error rate (WER) is distinct between the audios recorded by the distant microphone arrays and the individual headset microphones. The official baseline exhibits a WER gap of approximately 10% even though the guided source separation (GSS) has achieved considerable WER reduction. In the paper, we make an effort to integrate an improved GSS with a strong automatic speech recognition (ASR) back-end, which bridges the WER gap and achieves substantial ASR performance improvement. Specifically, the proposed GSS is initialized by masks from data-driven deep-learning models, utilizes the spectral information and conducts a selection of the input channels. Meanwhile, we propose a data augmentation technique via random channel selection and deep convolutional neural network-based multi-channel acoustic models for back-end modeling. In the experiments, our framework largely reduced the WER to 34.78%/36.85% on the CHiME-6 development/evaluation set. Moreover, a narrower gap of 0.89%/4.67% was observed between the distant and headset audios. This framework is also the foundation of the IOA’s submission to the CHiME-6 competition, which is ranked among the top systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dongmei Wang|AUTHOR Dongmei Wang]], [[Zhuo Chen|AUTHOR Zhuo Chen]], [[Takuya Yoshioka|AUTHOR Takuya Yoshioka]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 339–343&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a neural network based speech separation method using spatially distributed microphones. Unlike with traditional microphone array settings, neither the number of microphones nor their spatial arrangement is known in advance, which hinders the use of conventional multi-channel speech separation neural networks based on fixed size input. To overcome this, a novel network architecture is proposed that interleaves inter-channel processing layers and temporal processing layers. The inter-channel processing layers apply a self-attention mechanism along the channel dimension to exploit the information obtained with a varying number of microphones. The temporal processing layers are based on a bidirectional long short term memory (BLSTM) model and applied to each channel independently. The proposed network leverages information across time and space by stacking these two kinds of layers alternately. Our network estimates time-frequency (TF) masks for each speaker, which are then used to generate enhanced speech signals either with TF masking or beamforming. Speech recognition experimental results show that the proposed method significantly outperforms baseline multi-channel speech separation systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shota Horiguchi|AUTHOR Shota Horiguchi]], [[Yusuke Fujita|AUTHOR Yusuke Fujita]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]
</p><p class="cpabstractcardaffiliationlist">Hitachi, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 344–348&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A novel framework for meeting transcription using asynchronous microphones is proposed in this paper. It consists of audio synchronization, speaker diarization, utterance-wise speech enhancement using guided source separation, automatic speech recognition, and duplication reduction. Doing speaker diarization before speech enhancement enables the system to deal with overlapped speech without considering sampling frequency mismatch between microphones. Evaluation on our real meeting datasets showed that our framework achieved a character error rate (CER) of 28.7% by using 11 distributed microphones, while a monaural microphone placed on the center of the table had a CER of 38.2%. We also showed that our framework achieved CER of 21.8%, which is only 2.1 percentage points higher than the CER in headset microphone-based transcription.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Catarina Botelho|AUTHOR Catarina Botelho]]^^1^^, [[Lorenz Diener|AUTHOR Lorenz Diener]]^^2^^, [[Dennis Küster|AUTHOR Dennis Küster]]^^2^^, [[Kevin Scheck|AUTHOR Kevin Scheck]]^^2^^, [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]]^^3^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^3^^, [[Tanja Schultz|AUTHOR Tanja Schultz]]^^2^^, [[Alberto Abad|AUTHOR Alberto Abad]]^^1^^, [[Isabel Trancoso|AUTHOR Isabel Trancoso]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^INESC-ID Lisboa, Portugal; ^^2^^Universität Bremen, Germany; ^^3^^Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 354–358&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Electromyographic (EMG) signals recorded during speech production encode information on articulatory muscle activity and also on the facial expression of emotion, thus representing a speech-related biosignal with strong potential for paralinguistic applications. In this work, we estimate the electrical activity of the muscles responsible for speech articulation directly from the speech signal. To this end, we first perform a neural conversion of speech features into electromyographic time domain features, and then attempt to retrieve the original EMG signal from the time domain features. We propose a feed forward neural network to address the first step of the problem (speech features to EMG features) and a neural network composed of a convolutional block and a bidirectional long short-term memory block to address the second problem (true EMG features to EMG signal). We observe that four out of the five originally proposed time domain features can be estimated reasonably well from the speech signal. Further, the five time domain features are able to predict the original speech-related EMG signal with a concordance correlation coefficient of 0.663. We further compare our results with the ones achieved on the inverse problem of generating acoustic speech features from EMG features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiaxuan Zhang|AUTHOR Jiaxuan Zhang]], [[Sarah Ita Levitan|AUTHOR Sarah Ita Levitan]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]]
</p><p class="cpabstractcardaffiliationlist">Columbia University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 359–363&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deception detection in conversational dialogue has attracted much attention in recent years. Yet existing methods for this rely heavily on human-labeled annotations that are costly and potentially inaccurate. In this work, we present an automated system that utilizes multimodal features for conversational deception detection, without the use of human annotations. We study the predictive power of different modalities and combine them for better performance. We use openSMILE to extract acoustic features after applying noise reduction techniques to the original audio. Facial landmark features are extracted from the visual modality. We experiment with training facial expression detectors and applying Fisher Vectors to encode sequences of facial landmarks with varying length. Linguistic features are extracted from automatic transcriptions of the data. We examine the performance of these methods on the Box of Lies dataset of deception game videos, achieving 73% accuracy using features from all modalities. This result is significantly better than previous results on this corpus which relied on manual annotations, and also better than human performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zexu Pan|AUTHOR Zexu Pan]]^^1^^, [[Zhaojie Luo|AUTHOR Zhaojie Luo]]^^2^^, [[Jichen Yang|AUTHOR Jichen Yang]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^Osaka University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 364–368&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion represents an essential aspect of human speech that is manifested in speech prosody. Speech, visual, and textual cues are complementary in human communication. In this paper, we study a hybrid fusion method, referred to as multi-modal attention network (MMAN) to makes use of visual and textual cues in speech emotion recognition. We propose a novel multi-modal attention mechanism, cLSTM-MMA, which facilitates the attention across three modalities and selectively fuse the information. cLSTM-MMA is fused with other uni-modal sub-networks in the late fusion. The experiments show that speech emotion recognition benefits significantly from visual and textual cues, and the proposed cLSTM-MMA alone is as competitive as other fusion methods in terms of accuracy, but with a much more compact network structure. The proposed hybrid network MMAN achieves state-of-the-art performance on IEMOCAP database for emotion recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guang Shen|AUTHOR Guang Shen]]^^1^^, [[Riwei Lai|AUTHOR Riwei Lai]]^^1^^, [[Rui Chen|AUTHOR Rui Chen]]^^1^^, [[Yu Zhang|AUTHOR Yu Zhang]]^^2^^, [[Kejia Zhang|AUTHOR Kejia Zhang]]^^1^^, [[Qilong Han|AUTHOR Qilong Han]]^^1^^, [[Hongtao Song|AUTHOR Hongtao Song]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Harbin Engineering University, China; ^^2^^SUSTech, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 369–373&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While having numerous real-world applications, speech emotion recognition is still a technically challenging problem. How to effectively leverage the inherent multiple modalities in speech data (e.g., audio and text) is key to accurate classification. Existing studies normally choose to fuse multimodal features at the utterance level and largely neglect the dynamic interplay of features from different modalities at a fine-granular level over time. In this paper, we explicitly model dynamic interactions between audio and text at the word level via interaction units between two long short-term memory networks representing audio and text. We also devise a hierarchical representation of audio information from the frame, phoneme and word levels, which largely improves the expressiveness of resulting audio features. We finally propose WISE, a novel __w__ord-level __i__nteraction-based multimodal fusion framework for __s__peech __e__motion recognition, to accommodate the aforementioned components. We evaluate WISE on the public benchmark IEMOCAP corpus and demonstrate that it outperforms state-of-the-art methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ming Chen|AUTHOR Ming Chen]]^^1^^, [[Xudong Zhao|AUTHOR Xudong Zhao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Zhejiang University, China; ^^2^^Hithink RoyalFlush Information Network, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 374–378&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech emotion recognition (SER) is a challenging task that requires to learn suitable features for achieving good performance. The development of deep learning techniques makes it possible to automatically extract features rather than construct hand-crafted features. In this paper, a multi-scale fusion framework named STSER is proposed for bimodal SER by using speech and text information. A smodel, which takes advantage of convolutional neural network (CNN), bi-directional long short-term memory (Bi-LSTM) and the attention mechanism, is proposed to learn speech representation from the log-mel spectrogram extracted from speech data. Specifically, the CNN layers are utilized to learn local correlations. Then the Bi-LSTM layer is applied to learn long-term dependencies and contextual information. Finally, the multi-head self-attention layer makes the model focus on the features that are most related to the emotions. A tmodel using a pre-trained ALBERT model is applied for learning text representation from text data. Finally, a multi-scale fusion strategy, including feature fusion and ensemble learning, is applied to improve the overall performance. Experiments conducted on the public emotion dataset IEMOCAP have shown that the proposed STSER can achieve comparable recognition accuracy with fewer feature inputs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pengfei Liu|AUTHOR Pengfei Liu]]^^1^^, [[Kun Li|AUTHOR Kun Li]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SpeechX, China; ^^2^^CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 379–383&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion recognition is a challenging and actively-studied research area that plays a critical role in emotion-aware human-computer interaction systems. In a multimodal setting, temporal alignment between different modalities has not been well investigated yet. This paper presents a new model named as Gated Bidirectional Alignment Network (GBAN), which consists of an attention-based bidirectional alignment network over LSTM hidden states to explicitly capture the alignment relationship between speech and text, and a novel group gated fusion (GGF) layer to integrate the representations of different modalities. We empirically show that the attention-aligned representations outperform the last-hidden-states of LSTM significantly, and the proposed GBAN model outperforms existing state-of-the-art multimodal approaches on the IEMOCAP dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aparna Khare|AUTHOR Aparna Khare]], [[Srinivas Parthasarathy|AUTHOR Srinivas Parthasarathy]], [[Shiva Sundaram|AUTHOR Shiva Sundaram]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 384–388&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>General embeddings like word2vec, GloVe and ELMo have shown a lot of success in natural language tasks. The embeddings are typically extracted from models that are built on general tasks such as skip-gram models and natural language generation. In this paper, we extend the work from natural language understanding to multi-modal architectures that use audio, visual and textual information for machine learning tasks. The embeddings in our network are extracted using the encoder of a transformer model trained using multi-task training. We use person identification and automatic speech recognition as the tasks in our embedding generation framework. We tune and evaluate the embeddings on the downstream task of emotion recognition and demonstrate that on the CMU-MOSEI dataset, the embeddings can be used to improve over previous state of the art results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jeng-Lin Li|AUTHOR Jeng-Lin Li]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 389–393&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Integrating multimodal emotion sensing modules in realizing human-centered technologies is rapidly growing. Despite recent advancement of deep architectures in improving recognition performances, inability to handle individual differences in the expressive cues creates a major hurdle for real world applications. In this work, we propose a Speaker-aligned Graph Memory Network (SaGMN) that leverages the use of speaker embedding learned from a large speaker verification network to characterize such an individualized personal difference across speakers. Specifically, the learning of the gated memory block is jointly optimized with a speaker graph encoder which aligns similar vocal characteristics samples together while effectively enlarge the discrimination across emotion classes. We evaluate our multimodal emotion recognition network on the CMU-MOSEI database and achieve a state-of-art accuracy of 65.1% UAR and 74.7% F1 score. Further visualization experiments demonstrate the effect of speaker space alignment with the use of graph memory blocks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zheng Lian|AUTHOR Zheng Lian]]^^1^^, [[Jianhua Tao|AUTHOR Jianhua Tao]]^^1^^, [[Bin Liu|AUTHOR Bin Liu]]^^1^^, [[Jian Huang|AUTHOR Jian Huang]]^^1^^, [[Zhanlei Yang|AUTHOR Zhanlei Yang]]^^2^^, [[Rongjun Li|AUTHOR Rongjun Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Huawei Technologies, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 394–398&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion recognition remains a complex task due to speaker variations and low-resource training samples. To address these difficulties, we focus on the domain adversarial neural networks (DANN) for emotion recognition. The primary task is to predict emotion labels. The secondary task is to learn a common representation where speaker identities can not be distinguished. By using this approach, we bring the representations of different speakers closer. Meanwhile, through using the unlabeled data in the training process, we alleviate the impact of low-resource training samples. In the meantime, prior work found that contextual information and multimodal features are important for emotion recognition. However, previous DANN based approaches ignore these information, thus limiting their performance. In this paper, we propose the context-dependent domain adversarial neural network for multimodal emotion recognition. To verify the effectiveness of our proposed method, we conduct experiments on the benchmark dataset IEMOCAP. Experimental results demonstrate that the proposed method shows an absolute improvement of 3.48% over state-of-the-art strategies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhao Ren|AUTHOR Zhao Ren]], [[Jing Han|AUTHOR Jing Han]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]
</p><p class="cpabstractcardaffiliationlist">Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 496–500&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Well-designed adversarial examples can easily fool deep speech emotion recognition models into misclassifications. The transferability of adversarial attacks is a crucial evaluation indicator when generating adversarial examples to fool a new target model or multiple models. Herein, we propose a method to improve the transferability of black-box adversarial attacks using lifelong learning. First, black-box adversarial examples are generated by an atrous Convolutional Neural Network (CNN) model. This initial model is trained to attack a CNN target model. Then, we adapt the trained atrous CNN attacker to a new CNN target model using lifelong learning. We use this paradigm, as it enables multi-task sequential learning, which saves more memory space than conventional multi-task learning. We verify this property on an emotional speech database, by demonstrating that the updated atrous CNN model can attack all target models which have been learnt, and can better attack a new target model than an attack model trained on one target model only.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Han Feng|AUTHOR Han Feng]], [[Sei Ueno|AUTHOR Sei Ueno]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 501–505&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose speech emotion recognition (SER) combined with an acoustic-to-word automatic speech recognition (ASR) model. While acoustic prosodic features are primarily used for SER, textual features are also useful but are error-prone, especially in emotional speech. To solve this problem, we integrate ASR model and SER model in an end-to-end manner. This is done by using an acoustic-to-word model. Specifically, we utilize the states of the decoder in the ASR model with the acoustic features and input them into the SER model. On top of a recurrent network to learn features from this input, we adopt a self-attention mechanism to focus on important feature frames. Finally, we finetune the ASR model on the new dataset using a multi-task learning method to jointly optimize ASR with the SER task. Our model has achieved a 68.63% weighted accuracy (WA) and 69.67% unweighted accuracy (UA) on the IEMOCAP database, which is state-of-the-art performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bo-Hao Su|AUTHOR Bo-Hao Su]], [[Chun-Min Chang|AUTHOR Chun-Min Chang]], [[Yun-Shao Lin|AUTHOR Yun-Shao Lin]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 506–510&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The manner that human encodes emotion information within an utterance is often complex and could result in a diverse salient acoustic profile that is conditioned on emotion types. In this work, we propose a framework in imposing a graph attention mechanism on gated recurrent unit network (GA-GRU) to improve utterance-based speech emotion recognition (SER). Our proposed GA-GRU combines both long-range time-series based modeling of speech and further integrates complex saliency using a graph structure. We evaluate our proposed GA-GRU on the IEMOCAP and the MSP-IMPROV database and achieve a 63.8% UAR and 57.47% UAR in a four class emotion recognition task. The GA-GRU obtains consistently better performances as compared to recent state-of-art in per-utterance emotion classification model, and we further observe that different emotion categories would require distinct flexible structures in modeling emotion information in the acoustic data that is beyond conventional //left-to-right// or vice versa.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adria Mallol-Ragolta|AUTHOR Adria Mallol-Ragolta]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]
</p><p class="cpabstractcardaffiliationlist">Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 511–515&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>One of the keys for supervised learning techniques to succeed resides in the access to vast amounts of labelled training data. The process of data collection, however, is expensive, time-consuming, and application dependent. In the current digital era, data can be collected continuously. This continuity renders data annotation into an endless task, which potentially, in problems such as emotion recognition, requires annotators with different cultural backgrounds. Herein, we study the impact of utilising data from different cultures in a semi-supervised learning approach to label training material for the automatic recognition of arousal and valence. Specifically, we compare the performance of culture-specific affect recognition models trained with manual or cross-cultural automatic annotations. The experiments performed in this work use the dataset released for the Cross-cultural Emotion Sub-challenge of the Audio/Visual Emotion Challenge (AVEC) 2019. The results obtained convey that the cultures used for training impact on the system performance. Furthermore, in most of the scenarios assessed, affect recognition models trained with hybrid solutions, combining manual and automatic annotations, surpass the baseline model, which was exclusively trained with manual annotations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kusha Sridhar|AUTHOR Kusha Sridhar]], [[Carlos Busso|AUTHOR Carlos Busso]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 516–520&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Reliable and generalizable //speech emotion recognition// (SER) systems have wide applications in various fields including healthcare, customer service, and security and defense. Towards this goal, this study presents a novel //teacher-student// (T-S) framework for SER, relying on an ensemble of probabilistic predictions of teacher embeddings to train an ensemble of students. We use uncertainty modeling with //Monte-Carlo// (MC) dropout to create a distribution for the embeddings of an intermediate dense layer of the teacher. The embeddings guiding the student models are derived by sampling from this distribution. The final prediction combines the results obtained by the student ensemble. The proposed model not only increases the prediction performance over the teacher model, but also generates more consistent predictions. As a T-S formulation, the approach allows the use of unlabeled data to improve the performance of the students in a semi-supervised manner. An ablation analysis shows the importance of the MC-based ensemble and the use of unlabeled data. The results show relative improvements in //concordance correlation coefficient// (CCC) up to 4.25% for arousal, 2.67% for valence and 4.98% for dominance from their baseline results. The results also show that the student ensemble decreases the uncertainty in the predictions, leading to more consistent results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siddique Latif|AUTHOR Siddique Latif]]^^1^^, [[Muhammad Asim|AUTHOR Muhammad Asim]]^^2^^, [[Rajib Rana|AUTHOR Rajib Rana]]^^1^^, [[Sara Khalifa|AUTHOR Sara Khalifa]]^^3^^, [[Raja Jurdak|AUTHOR Raja Jurdak]]^^4^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern Queensland, Australia; ^^2^^Information Technology University, Pakistan; ^^3^^CSIRO, Australia; ^^4^^Queensland University of Technology, Australia; ^^5^^Imperial College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 521–525&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Generative adversarial networks (GANs) have shown potential in learning emotional attributes and generating new data samples. However, their performance is usually hindered by the unavailability of larger speech emotion recognition (SER) data. In this work, we propose a framework that utilises the mixup data augmentation scheme to augment the GAN in feature learning and generation. To show the effectiveness of the proposed framework, we present results for SER on (i) synthetic feature vectors, (ii) augmentation of the training data with synthetic features, (iii) encoded features in compressed representation. Our results show that the proposed framework can effectively learn compressed emotional representations as well as it can generate synthetic samples that help improve performance in within-corpus and cross-corpus evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vipula Dissanayake|AUTHOR Vipula Dissanayake]], [[Haimo Zhang|AUTHOR Haimo Zhang]], [[Mark Billinghurst|AUTHOR Mark Billinghurst]], [[Suranga Nanayakkara|AUTHOR Suranga Nanayakkara]]
</p><p class="cpabstractcardaffiliationlist">University of Auckland, New Zealand</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 526–530&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech Emotion Recognition (SER) has been a challenging task on which researchers have been working for decades. Recently, Deep Learning (DL) based approaches have been shown to perform well in SER tasks; however, it has been noticed that their superior performance is limited to the distribution of the data used to train the model. In this paper, we present an analysis of using autoencoders to improve the generalisability of DL based SER solutions. We train a sparse autoencoder using a large speech corpus extracted from social media. Later, the trained encoder part of the autoencoder is reused as the input to a long short-term memory (LSTM) network, and the encoder-LSTM modal is re-trained on an aggregation of five commonly used speech emotion corpora. Our evaluation uses an unseen corpus in the training & validation stages to simulate ‘in the wild’ condition and analyse the generalisability of our solution. A performance comparison is carried out between the encoder based model and a model trained without an encoder. Our results show that the autoencoder based model improves the unweighted accuracy of the unseen corpus by 8%, indicating autoencoder based pre-training can improve the generalisability of DL based SER solutions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuiyang Mao|AUTHOR Shuiyang Mao]], [[P.C. Ching|AUTHOR P.C. Ching]], [[Tan Lee|AUTHOR Tan Lee]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 531–535&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human emotions are inherently ambiguous and impure. When designing systems to anticipate human emotions based on speech, the lack of emotional purity must be considered. However, most of the current methods for speech emotion classification rest on the consensus, e. g., one single hard label for an utterance. This labeling principle imposes challenges for system performance considering emotional impurity. In this paper, we recommend the use of emotional profiles (EPs), which provides a time series of segment-level soft labels to capture the subtle blends of emotional cues present across a specific speech utterance. We further propose the emotion profile refinery (EPR), an iterative procedure to update EPs. The EPR method produces soft, dynamically-generated, multiple probabilistic class labels during successive stages of refinement, which results in significant improvements in the model accuracy. Experiments on three well-known emotion corpora show noticeable gain using the proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sung-Lin Yeh|AUTHOR Sung-Lin Yeh]], [[Yun-Shao Lin|AUTHOR Yun-Shao Lin]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 536–540&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Developing robust speech emotion recognition (SER) systems is challenging due to small-scale of existing emotional speech datasets. However, previous works have mostly relied on handcrafted acoustic features to build SER models that are difficult to handle a wide range of acoustic variations. One way to alleviate this problem is by using speech representations learned from deep end-to-end models trained on large-scale speech database. Specifically, in this paper, we leverage an end-to-end ASR to extract ASR-based representations for speech emotion recognition. We further devise a factorized domain adaptation approach on the pre-trained ASR model to improve both the speech recognition rate and the emotion recognition accuracy on the target emotion corpus, and we also provide an analysis in the effectiveness of representations extracted from different ASR layers. Our experiments demonstrate the importance of ASR adaptation and layer depth for emotion recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shaojin Ding|AUTHOR Shaojin Ding]]^^1^^, [[Tianlong Chen|AUTHOR Tianlong Chen]]^^2^^, [[Xinyu Gong|AUTHOR Xinyu Gong]]^^1^^, [[Weiwei Zha|AUTHOR Weiwei Zha]]^^3^^, [[Zhangyang Wang|AUTHOR Zhangyang Wang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Texas A&M University, USA; ^^2^^University of Texas at Austin, USA; ^^3^^USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 916–920&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker recognition systems based on Convolutional Neural Networks (CNNs) are often built with off-the-shelf backbones such as VGG-Net or ResNet. However, these backbones were originally proposed for image classification, and therefore may not be naturally fit for speaker recognition. Due to the prohibitive complexity of manually exploring the design space, we propose the first neural architecture search approach for the speaker recognition tasks, named as ''AutoSpeech''. Our algorithm first identifies the optimal operation combination in a neural cell and then derives a CNN model by stacking the neural cell for multiple times. The final speaker recognition model can be obtained by training the derived CNN model through the standard scheme. To evaluate the proposed approach, we conduct experiments on both speaker identification and speaker verification tasks using the VoxCeleb1 dataset. Results demonstrate that the derived CNN architectures from the proposed approach significantly outperform current speaker recognition systems based on VGG-M, ResNet-18, and ResNet-34 backbones, while enjoying lower model complexity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoyang Qu|AUTHOR Xiaoyang Qu]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Jing Xiao|AUTHOR Jing Xiao]]
</p><p class="cpabstractcardaffiliationlist">Ping An Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 961–965&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art speaker verification models are based on deep learning techniques, which heavily depend on the hand-designed neural architectures from experts or engineers. We borrow the idea of //neural architecture search (NAS)// for the //text-independent speaker verification task//. As NAS can learn deep network structures automatically, we introduce the NAS conception into the well-known x-vector network. Furthermore, this paper proposes an evolutionary algorithm enhanced neural architecture search method called Auto-Vector to automatically discover promising networks for the speaker verification task. The experimental results demonstrate our NAS-based model outperforms state-of-the-art speaker verification models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ya-Qi Yu|AUTHOR Ya-Qi Yu]], [[Wu-Jun Li|AUTHOR Wu-Jun Li]]
</p><p class="cpabstractcardaffiliationlist">Nanjing University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 921–925&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Time delay neural network (TDNN) has been widely used in speaker verification tasks. Recently, two TDNN-based models, including extended TDNN (E-TDNN) and factorized TDNN (F-TDNN), are proposed to improve the accuracy of vanilla TDNN. But E-TDNN and F-TDNN increase the number of parameters due to deeper networks, compared with vanilla TDNN. In this paper, we propose a novel TDNN-based model, called __d__ensely connected TDNN (D-TDNN), by adopting bottleneck layers and dense connectivity. D-TDNN has fewer parameters than existing TDNN-based models. Furthermore, we propose an improved variant of D-TDNN, called D-TDNN-SS, to employ multiple TDNN branches with short-term and long-term contexts. D-TDNN-SS can integrate the information from multiple TDNN branches with a newly designed channel-wise selection mechanism called __s__tatistics-and- __s__election (SS). Experiments on VoxCeleb datasets show that both D-TDNN and D-TDNN-SS can outperform existing models to achieve state-of-the-art accuracy with fewer parameters, and D-TDNN-SS can achieve better accuracy than D-TDNN.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siqi Zheng|AUTHOR Siqi Zheng]], [[Yun Lei|AUTHOR Yun Lei]], [[Hongbin Suo|AUTHOR Hongbin Suo]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 926–930&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we propose an end-to-end phonetically-aware coupled network for short duration speaker verification tasks. Phonetic information is shown to be beneficial for identifying short utterances. A coupled network structure is proposed to exploit phonetic information. The coupled convolutional layers allow the network to provide frame-level supervision based on phonetic representations of the corresponding frames. The end-to-end training scheme using triplet loss function provides direct comparison of speech contents between two utterances and hence enabling phonetic-based normalization. Our systems are compared against the current mainstream speaker verification systems on both NIST SRE and VoxCeleb evaluation datasets. Relative reductions of up to 34% in equal error rate are reported.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Myunghun Jung|AUTHOR Myunghun Jung]], [[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Jahyun Goo|AUTHOR Jahyun Goo]], [[Hoirin Kim|AUTHOR Hoirin Kim]]
</p><p class="cpabstractcardaffiliationlist">KAIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 931–935&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword spotting (KWS) and speaker verification (SV) have been studied independently although it is known that acoustic and speaker domains are complementary. In this paper, we propose a multi-task network that performs KWS and SV simultaneously to fully utilize the interrelated domain information. The multi-task network tightly combines sub-networks aiming at performance improvement in challenging conditions such as noisy environments, open-vocabulary KWS, and short-duration SV, by introducing novel techniques of connectionist temporal classification (CTC)-based soft voice activity detection (VAD) and global query attention. Frame-level acoustic and speaker information is integrated with phonetically originated weights so that forms a word-level global representation. Then it is used for the aggregation of feature vectors to generate discriminative embeddings. Our proposed approach shows 4.06% and 26.71% relative improvements in equal error rate (EER) compared to the baselines for both tasks. We also present a visualization example and results of ablation experiments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanfeng Wu|AUTHOR Yanfeng Wu]], [[Chenkai Guo|AUTHOR Chenkai Guo]], [[Hongcan Gao|AUTHOR Hongcan Gao]], [[Xiaolei Hou|AUTHOR Xiaolei Hou]], [[Jing Xu|AUTHOR Jing Xu]]
</p><p class="cpabstractcardaffiliationlist">Nankai University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 936–940&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The pooling mechanism plays an important role in deep neural network based systems for text-independent speaker verification, which aggregates the variable-length frame-level vector sequence across all frames into a fixed-dimensional utterance-level representation. Previous attentive pooling methods employ scalar attention weights for each frame-level vector, resulting in insufficient collection of discriminative information. To address this issue, this paper proposes a vector-based attentive pooling method, which adopts vectorial attention instead of scalar attention. The vectorial attention can extract fine-grained features for discriminating different speakers. Besides, the vector-based attentive pooling is extended in a multi-head way for better speaker embeddings from multiple aspects. The proposed pooling method is evaluated with the x-vector baseline system. Experiments are conducted on two public datasets, VoxCeleb and Speaker in the Wild (SITW). The results show that the vector-based attentive pooling method achieves superior performance compared with statistics pooling and three state-of-the-art attentive pooling methods, with the best equal error rate (EER) of 2.734 and 3.062 in SITW as well as the best EER of 2.466 in VoxCeleb.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pooyan Safari|AUTHOR Pooyan Safari]], [[Miquel India|AUTHOR Miquel India]], [[Javier Hernando|AUTHOR Javier Hernando]]
</p><p class="cpabstractcardaffiliationlist">Universitat Politècnica de Catalunya, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 941–945&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The computing power of mobile devices limits the end-user applications in terms of storage size, processing, memory and energy consumption. These limitations motivate researchers for the design of more efficient deep models. On the other hand, self-attention networks based on //Transformer// architecture have attracted remarkable interests due to their high parallelization capabilities and strong performance on a variety of Natural Language Processing (NLP) applications. Inspired by the //Transformer//, we propose a tandem Self-Attention Encoding and Pooling (SAEP) mechanism to obtain a discriminative speaker embedding given non-fixed length speech utterances. SAEP is a stack of identical blocks solely relied on self-attention and position-wise feed-forward networks to create vector representation of speakers. This approach encodes short-term speaker spectral features into speaker embeddings to be used in text-independent speaker verification. We have evaluated this approach on both //VoxCeleb1 & 2// datasets. The proposed architecture is able to outperform the baseline x-vector, and shows competitive performance to some other benchmarks based on convolutions, with a significant reduction in model size. It employs 94%, 95%, and 73% less parameters compared to ResNet-34, ResNet-50, and x-vector, respectively. This indicates that the proposed fully attention based architecture is more efficient in extracting time-invariant features from speaker utterances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruiteng Zhang|AUTHOR Ruiteng Zhang]], [[Jianguo Wei|AUTHOR Jianguo Wei]], [[Wenhuan Lu|AUTHOR Wenhuan Lu]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Meng Liu|AUTHOR Meng Liu]], [[Lin Zhang|AUTHOR Lin Zhang]], [[Jiayu Jin|AUTHOR Jiayu Jin]], [[Junhai Xu|AUTHOR Junhai Xu]]
</p><p class="cpabstractcardaffiliationlist">Tianjin University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 946–950&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The time-delay neural network (TDNN) is widely used in speaker verification to extract long-term temporal features of speakers. Although common TDNN approaches well capture time-sequential information, they lack the delicate transformations needed for deep representation. To solve this problem, we propose two TDNN architectures. RET integrates shortcut connections into conventional time-delay blocks, and ARET adopts a split-transform-merge strategy to extract more discriminative representation. Experiments on VoxCeleb datasets without augmentation indicate that ARET realizes satisfactory performance on the VoxCeleb1 test set, VoxCeleb1-E, and VoxCeleb1-H, with 1.389%, 1.520%, and 2.614% equal error rate (EER), respectively. Compared to state-of-the-art results on these test sets, RET achieves a 23%~43% relative reduction in EER, and ARET reaches 32%~45%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hanyi Zhang|AUTHOR Hanyi Zhang]]^^1^^, [[Longbiao Wang|AUTHOR Longbiao Wang]]^^2^^, [[Yunchun Zhang|AUTHOR Yunchun Zhang]]^^1^^, [[Meng Liu|AUTHOR Meng Liu]]^^2^^, [[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^3^^, [[Jianguo Wei|AUTHOR Jianguo Wei]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Yunnan University, China; ^^2^^Tianjin University, China; ^^3^^NEC, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 951–955&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural networks (DNN) have achieved great success in speaker recognition systems. However, it is observed that DNN based systems are easily deceived by adversarial examples leading to wrong predictions. Adversarial examples, which are generated by adding purposeful perturbations on natural examples, pose a serious security threat. In this study, we propose the adversarial separation network (//AS-Net//) to protect the speaker recognition system against adversarial attacks. Our proposed //AS-Net// is featured by its ability to separate adversarial perturbation from the test speech to restore the natural clean speech. As a standalone component, each input speech is pre-processed by //AS-Net// first. Furthermore, we incorporate the compression structure and the speaker quality loss to enhance the capacity of the //AS-Net//. Experimental results on the VCTK dataset demonstrated that the //AS-Net// effectively enhanced the robustness of speaker recognition systems against adversarial examples. It also significantly outperformed other state-of-the-art adversarial-detection mechanisms, including adversarial perturbation elimination network (APE-GAN), feature squeezing, and adversarial training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jingyu Li|AUTHOR Jingyu Li]], [[Tan Lee|AUTHOR Tan Lee]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 956–960&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a novel design of attention model for text-independent speaker verification. The model takes a pair of input utterances and generates an utterance-level embedding to represent speaker-specific characteristics in each utterance. The input utterances are expected to have highly similar embeddings if they are from the same speaker. The proposed attention model consists of a self-attention module and a mutual attention module, which jointly contributes to the generation of the utterance-level embedding. The self-attention weights are computed from the utterance itself while the mutual-attention weights are computed with the involvement of the other utterance in the input pairs. As a result, each utterance is represented by a self-attention weighted embedding and a mutual-attention weighted embedding. The similarity between the embeddings is measured by a cosine distance score and a binary classifier output score. The whole model, named Dual Attention Network, is trained end-to-end on Voxceleb database. The evaluation results on Voxceleb 1 test set show that the Dual Attention Network significantly outperforms the baseline systems. The best result yields an equal error rate of 1.6%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chao Weng|AUTHOR Chao Weng]], [[Chengzhu Yu|AUTHOR Chengzhu Yu]], [[Jia Cui|AUTHOR Jia Cui]], [[Chunlei Zhang|AUTHOR Chunlei Zhang]], [[Dong Yu|AUTHOR Dong Yu]]
</p><p class="cpabstractcardaffiliationlist">Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 966–970&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we propose minimum Bayes risk (MBR) training of RNN-Transducer (RNN-T) for end-to-end speech recognition. Specifically, initialized with a RNN-T trained model, MBR training is conducted via minimizing the expected edit distance between the reference label sequence and on-the-fly generated N-best hypothesis. We also introduce a heuristic to incorporate an external neural network language model (NNLM) in RNN-T beam search decoding and explore MBR training with the external NNLM. Experimental results demonstrate an MBR trained model outperforms a RNN-T trained model substantially and further improvements can be achieved if trained with an external NNLM. Our best MBR trained system achieves absolute character error rate (CER) reductions of 1.2% and 0.5% on read and spontaneous Mandarin speech respectively over a strong convolution and transformer based RNN-T baseline trained on ~21,000 hours of speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chengyi Wang|AUTHOR Chengyi Wang]]^^1^^, [[Yu Wu|AUTHOR Yu Wu]]^^2^^, [[Yujiao Du|AUTHOR Yujiao Du]]^^3^^, [[Jinyu Li|AUTHOR Jinyu Li]]^^4^^, [[Shujie Liu|AUTHOR Shujie Liu]]^^2^^, [[Liang Lu|AUTHOR Liang Lu]]^^4^^, [[Shuo Ren|AUTHOR Shuo Ren]]^^2^^, [[Guoli Ye|AUTHOR Guoli Ye]]^^4^^, [[Sheng Zhao|AUTHOR Sheng Zhao]]^^2^^, [[Ming Zhou|AUTHOR Ming Zhou]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nankai University, China; ^^2^^Microsoft, China; ^^3^^BUPT, China; ^^4^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 971–975&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based encoder-decoder model has achieved impressive results for both automatic speech recognition (ASR) and text-to-speech (TTS) tasks. This approach takes advantage of the memorization capacity of neural networks to learn the mapping from the input sequence to the output sequence from scratch, without the assumption of prior knowledge such as the alignments. However, this model is prone to overfitting, especially when the amount of training data is limited. Inspired by SpecAugment and BERT, in this paper, we propose a semantic mask based regularization for training such kind of end-to-end (E2E) model. The idea is to mask the input features corresponding to a particular output token, e.g., a word or a word-piece, in order to encourage the model to fill the token based on the contextual information. While this approach is applicable to the encoder-decoder framework with any type of neural network architecture, we study the transformer-based model for ASR in this work. We perform experiments on Librispeech 960h and TedLium2 data sets, and achieve the state-of-the-art performance on the test set in the scope of E2E models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Frank Zhang|AUTHOR Frank Zhang]], [[Yongqiang Wang|AUTHOR Yongqiang Wang]], [[Xiaohui Zhang|AUTHOR Xiaohui Zhang]], [[Chunxi Liu|AUTHOR Chunxi Liu]], [[Yatharth Saraf|AUTHOR Yatharth Saraf]], [[Geoffrey Zweig|AUTHOR Geoffrey Zweig]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 976–980&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we first show that on the widely used LibriSpeech benchmark, our transformer-based context-dependent connectionist temporal classification (CTC) system produces state-of-the-art results. We then show that using wordpieces as modeling units combined with CTC training, we can greatly simplify the engineering pipeline compared to conventional frame-based cross-entropy training by excluding all the GMM bootstrapping, decision tree building and force alignment steps, while still achieving very competitive word-error-rate. Additionally, using wordpieces as modeling units can significantly improve runtime efficiency since we can use larger stride without losing accuracy. We further confirm these findings on two internal //VideoASR// datasets: German, which is similar to English as a fusional language, and Turkish, which is an agglutinative language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dimitrios Dimitriadis|AUTHOR Dimitrios Dimitriadis]], [[Kenichi Kumatani|AUTHOR Kenichi Kumatani]], [[Robert Gmyr|AUTHOR Robert Gmyr]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Sefik Emre Eskimez|AUTHOR Sefik Emre Eskimez]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 981–985&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a novel platform for Acoustic Model training based on Federated Learning (FL) is described. This is the first attempt to introduce Federated Learning techniques in Speech Recognition (SR) tasks. Besides the novelty of the task, the paper describes an easily generalizable FL platform and presents the design decisions used for this task. Amongst the novel algorithms introduced is a hierarchical optimization scheme employing pairs of optimizers and an algorithm for gradient selection, leading to improvements in training time and SR performance. The gradient selection algorithm is based on weighting the gradients during the aggregation step. It effectively acts as a regularization process right before the gradient propagation. This process may address one of the FL challenges, i.e. training on vastly heterogeneous data. The experimental validation of the proposed system is based on the LibriSpeech task, presenting a speed-up of ×1.5 and 6% WERR. The proposed Federated Learning system appears to outperform the golden standard of distributed training in both convergence speed and overall model performance. Further improvements have been experienced in internal tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Imran Sheikh|AUTHOR Imran Sheikh]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]], [[Irina Illina|AUTHOR Irina Illina]]
</p><p class="cpabstractcardaffiliationlist">Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 986–990&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work investigates semi-supervised training of acoustic models (AM) with the lattice-free maximum mutual information (LF-MMI) objective in practically relevant scenarios with a limited amount of labeled in-domain data. An error detection driven semi-supervised AM training approach is proposed, in which an error detector controls the hypothesized transcriptions or lattices used as LF-MMI training targets on additional unlabeled data. Under this approach, our first method uses a single error-tagged hypothesis whereas our second method uses a modified supervision lattice. These methods are evaluated and compared with existing semi-supervised AM training methods in three different matched or mismatched, limited data setups. Word error recovery rates of 28 to 89% are reported.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yixin Gao|AUTHOR Yixin Gao]], [[Noah D. Stein|AUTHOR Noah D. Stein]], [[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]], [[Yunliang Cai|AUTHOR Yunliang Cai]], [[Ming Sun|AUTHOR Ming Sun]], [[Tao Zhang|AUTHOR Tao Zhang]], [[Shiv Naga Prasad Vitaladevuni|AUTHOR Shiv Naga Prasad Vitaladevuni]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 991–995&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Wake word (WW) spotting is challenging in far-field due to the complexities and variations in acoustic conditions and the environmental interference in signal transmission. A suite of carefully designed and optimized audio front-end (AFE) algorithms help mitigate these challenges and provide better quality audio signals to the downstream modules such as WW spotter. Since the WW model is trained with the AFE-processed audio data, its performance is sensitive to AFE variations, such as gain changes. In addition, when deploying to new devices, the WW performance is not guaranteed because the AFE is unknown to the WW model. To address these issues, we propose a novel approach to use a new feature called ΔLFBE to decouple the AFE gain variations from the WW model. We modified the neural network architectures to accommodate the delta computation, with the feature extraction module unchanged. We evaluate our WW models using data collected from real household settings and showed the models with the ΔLFBE is robust to AFE gain changes. Specifically, when AFE gain changes up to ±12dB, the baseline CNN model lost up to relative 19.0% in false alarm rate or 34.3% in false reject rate, while the model with ΔLFBE demonstrates no performance loss.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fenglin Ding|AUTHOR Fenglin Ding]], [[Wu Guo|AUTHOR Wu Guo]], [[Bin Gu|AUTHOR Bin Gu]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Jun Du|AUTHOR Jun Du]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 996–1000&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose two novel regularization-based speaker adaptive training approaches for connectionist temporal classification (CTC) based speech recognition. The first method is center loss (CL) regularization, which is used to penalize the distances between the embeddings of different speakers and the only center. The second method is speaker variance loss (SVL) regularization in which we directly minimize the speaker interclass variance during model training. Both methods achieve the purpose of training an adaptive model on the fly by adding regularization terms to the training loss function. Our experiment on the AISHELL-1 Mandarin recognition task shows that both methods are effective at adapting the CTC model without requiring any specific fine-tuning or additional complexity, achieving character error rate improvements of up to 8.1% and 8.6% over the speaker independent (SI) model, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1001–1005&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigate the robustness and training dynamics of raw waveform acoustic models for automatic speech recognition (ASR). It is known that the first layer of such models learn a set of filters, performing a form of time-frequency analysis. This layer is liable to be under-trained owing to gradient vanishing, which can negatively affect the network performance. Through a set of experiments on TIMIT, Aurora-4 and WSJ datasets, we investigate the training dynamics of the first layer by measuring the evolution of its average frequency response over different epochs. We demonstrate that the network efficiently learns an optimal set of filters with a high spectral resolution and the dynamics of the first layer highly correlates with the dynamics of the cross entropy (CE) loss and word error rate (WER). In addition, we study the robustness of raw waveform models in both matched and mismatched conditions. The accuracy of these models is found to be comparable to, or better than, their MFCC-based counterparts in matched conditions and notably improved by using a better alignment. The role of raw waveform normalisation was also examined and up to 4.3% absolute WER reduction in mismatched conditions was achieved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qiantong Xu|AUTHOR Qiantong Xu]], [[Tatiana Likhomanenko|AUTHOR Tatiana Likhomanenko]], [[Jacob Kahn|AUTHOR Jacob Kahn]], [[Awni Hannun|AUTHOR Awni Hannun]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1006–1010&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Pseudo-labeling has recently shown promise in end-to-end automatic speech recognition (ASR). We study Iterative Pseudo-Labeling (IPL), a semi-supervised algorithm which efficiently performs multiple iterations of pseudo-labeling on unlabeled data as the acoustic model evolves. In particular, IPL fine tunes an existing model at each iteration using both labeled data and a subset of unlabeled data. We study the main components of IPL: decoding with a language model and data augmentation. We then demonstrate the effectiveness of IPL by achieving state-of-the-art word-error rate on the LIBRISPEECH test sets in both standard and low-resource setting. We also study the effect of language models trained on different corpora to show IPL can effectively utilize additional text. Finally, we release a new large in-domain text corpus which does not overlap with the LIBRISPEECH training transcriptions to foster research in low-resource, semi-supervised ASR.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naoko Kawamura|AUTHOR Naoko Kawamura]]^^1^^, [[Tatsuya Kitamura|AUTHOR Tatsuya Kitamura]]^^2^^, [[Kenta Hamada|AUTHOR Kenta Hamada]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Himeji Dokkyo University, Japan; ^^2^^Konan University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1011–1012&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Tube phonation, or straw phonation, is a frequently used vocal training technique to improve the efficiency of the vocal mechanism by repeatedly producing a speech sound into a tube or straw. Use of the straw results in a semi-occluded vocal tract in order to maximize the interaction between the vocal fold vibration and the vocal tract. This method requires a voice trainer or therapist to raise the trainee or patient’s awareness of the vibrations around his or her mouth, guiding him/her to maximize the vibrations, which results in efficient phonation. A major problem with this process is that the trainer cannot monitor the trainee/patient’s vibratory state in a quantitative manner. This study proposes the use of Smart Tube, a straw with an attached acceleration sensor and LED strip that can measure vibrations and provide corresponding feedback through LED lights in real-time. The biofeedback system was implemented using a microcontroller board, Arduino Uno, to minimize cost. Possible system function enhancements include Bluetooth compatibility with personal computers and/or smartphones. Smart Tube can facilitate improved phonation for trainees/patients by providing quantitative visual feedback.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seong Choi|AUTHOR Seong Choi]]^^1^^, [[Seunghoon Jeong|AUTHOR Seunghoon Jeong]]^^2^^, [[Jeewoo Yoon|AUTHOR Jeewoo Yoon]]^^1^^, [[Migyeong Yang|AUTHOR Migyeong Yang]]^^1^^, [[Minsam Ko|AUTHOR Minsam Ko]]^^2^^, [[Eunil Park|AUTHOR Eunil Park]]^^1^^, [[Jinyoung Han|AUTHOR Jinyoung Han]]^^1^^, [[Munyoung Lee|AUTHOR Munyoung Lee]]^^3^^, [[Seonghee Lee|AUTHOR Seonghee Lee]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Sungkyunkwan University, Korea; ^^2^^Hanyang University, Korea; ^^3^^ETRI, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1013–1014&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce an open-source Python library, VCTUBE, which can automatically generate <audio, text> pair of speech data from a given Youtube URL. We believe VCTUBE is useful for collecting, processing, and annotating speech data easily toward developing speech synthesis systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanlu Xie|AUTHOR Yanlu Xie]]^^1^^, [[Xiaoli Feng|AUTHOR Xiaoli Feng]]^^1^^, [[Boxue Li|AUTHOR Boxue Li]]^^2^^, [[Jinsong Zhang|AUTHOR Jinsong Zhang]]^^1^^, [[Yujia Jin|AUTHOR Yujia Jin]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BLCU, China; ^^2^^Yunfan Hailiang Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1015–1016&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, an APP with Mispronunciation Detection and Feedback for Mandarin L2 Learners is shown. The APP could detect the mispronunciation in the words and highlight it with red at the phone level. Also, the score will be shown to evaluate the overall pronunciation. When touching the highlight, the pronunciation of the learner’s and the standard’s is played. Then the flash animation that describes the movement of the tongue, mouth, and other articulators will be shown to the learner. The learner could repeat the process to improve and excise the pronunciation. The App called ‘SAIT Hànyǔ’ can be downloaded at App Store.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tejas Udayakumar|AUTHOR Tejas Udayakumar]], [[Kinnera Saranu|AUTHOR Kinnera Saranu]], [[Mayuresh Sanjay Oak|AUTHOR Mayuresh Sanjay Oak]], [[Ajit Ashok Saunshikar|AUTHOR Ajit Ashok Saunshikar]], [[Sandip Shriram Bapat|AUTHOR Sandip Shriram Bapat]]
</p><p class="cpabstractcardaffiliationlist">Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1017–1018&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In a generation where industries are going through a paradigm shift because of the rampant growth of deep learning, structured data plays a crucial role in the automation of various tasks. Textual structured data is one such kind which is extensively used in systems like chat bots and automatic speech recognition. Unfortunately, a majority of these textual data available is unstructured in the form of user reviews and feedback, social media posts etc. Automating the task of categorizing or clustering these data into meaningful domains will reduce the time and effort needed in building sophisticated human-interactive systems. In this paper, we present a web tool that builds a domain specific data based on a search phrase from a database of highly unstructured user utterances. We also show the usage of Elasticsearch database with custom indexes for full correlated text-search. This tool uses the open sourced Glove model combined with cosine similarity and performs a graph based search to provide semantically and syntactically meaningful corpora. In the end, we discuss its applications with respect to natural language processing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ke Shi|AUTHOR Ke Shi]], [[Kye Min Tan|AUTHOR Kye Min Tan]], [[Richeng Duan|AUTHOR Richeng Duan]], [[Siti Umairah Md. Salleh|AUTHOR Siti Umairah Md. Salleh]], [[Nur Farah Ain Suhaimi|AUTHOR Nur Farah Ain Suhaimi]], [[Rajan Vellu|AUTHOR Rajan Vellu]], [[Ngoc Thuy Huong Helen Thai|AUTHOR Ngoc Thuy Huong Helen Thai]], [[Nancy F. Chen|AUTHOR Nancy F. Chen]]
</p><p class="cpabstractcardaffiliationlist">A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1019–1020&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a computer-assisted language learning system that automatically evaluates the pronunciation and fluency of spoken Malay and Tamil. Our system consists of a server and a user-facing Android application, where the server is responsible for speech-to-text alignment as well as pronunciation and fluency scoring. We describe our system architecture and discuss the technical challenges associated with low resource languages. To the best of our knowledge, this work is the first pronunciation and fluency scoring system for Malay and Tamil.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takaaki Saeki|AUTHOR Takaaki Saeki]], [[Yuki Saito|AUTHOR Yuki Saito]], [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]
</p><p class="cpabstractcardaffiliationlist">University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1021–1022&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a real-time, full-band, online voice conversion (VC) system that uses a single CPU. For practical applications, VC must be high quality and able to perform real-time, online conversion with fewer computational resources. Our system achieves this by combining non-linear conversion with a deep neural network and short-tap, sub-band filtering. We evaluate our system and demonstrate that it 1) achieves the estimated complexity around 2.5 GFLOPS and measures real-time factor (RTF) around 0.5 with a single CPU and 2) can attain converted speech with a 3.4 / 5.0 mean opinion score (MOS) of naturalness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoli Feng|AUTHOR Xiaoli Feng]]^^1^^, [[Yanlu Xie|AUTHOR Yanlu Xie]]^^1^^, [[Yayue Deng|AUTHOR Yayue Deng]]^^1^^, [[Boxue Li|AUTHOR Boxue Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BLCU, China; ^^2^^Yunfan Hailiang Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1023–1024&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a dynamic three dimensional (3D) head model is introduced which is built based on knowledge of (the human) anatomy and the theory of distinctive features. The model is used to help Chinese learners understand the exact location and method of the phoneme articulation intuitively. You can access the phonetic learning system, choose the target sound you want to learn and then watch the 3D dynamic animations of the phonemes. You can look at the lips, tongue, soft palate, uvula, and other dynamic vocal organs as well as teeth, gums, hard jaw, and other passive vocal organs from different angles. In this process, you can make the skin and some of the muscles semi-transparent, or zoom in or out the model to see the dynamic changes of articulators clearly. By looking at the 3D model, learners can find the exact location of each sound and imitate the pronunciation actions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naoki Kimura|AUTHOR Naoki Kimura]], [[Zixiong Su|AUTHOR Zixiong Su]], [[Takaaki Saeki|AUTHOR Takaaki Saeki]]
</p><p class="cpabstractcardaffiliationlist">University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1025–1026&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work is the first attempt to apply an end-to-end, deep neural network-based automatic speech recognition (ASR) pipeline to the Silent Speech Challenge dataset (SSC), which contains synchronized ultrasound images and lip images captured when a single speaker read the TIMIT corpus without uttering audible sounds. In silent speech research using SSC dataset, established methods in ASR have been utilized with some modifications to use it in visual speech recognition. In this work, we tested the SOTA method of ASR on the SSC dataset using the End-to-End Speech Processing Toolkit, ESPnet. The experimental results show that this end-to-end method achieved a character error rate (CER) of 10.1% and a WER of 20.5% by incorporating SpecAugment, demonstrating the possibility to further improve the performance with additional data collection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kshitiz Kumar|AUTHOR Kshitiz Kumar]], [[Emilian Stoimenov|AUTHOR Emilian Stoimenov]], [[Hosam Khalil|AUTHOR Hosam Khalil]], [[Jian Wu|AUTHOR Jian Wu]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 541–545&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work we layout a Fast & Slow (F&S) acoustic model (AM) in an encoder-decoder architecture for streaming automatic speech recognition (ASR). The Slow model represents our baseline ASR model; it’s significantly larger than Fast model and provides stronger accuracy. The Fast model is generally developed for related speech applications. It has weaker ASR accuracy but is faster to evaluate and consequently leads to better user-perceived latency. We propose a joint F&S model that encodes output state information from Fast model, feeds that to Slow model to improve overall model accuracy from F&S AM. We demonstrate scenarios where individual Fast and Slow models are already available to build the joint F&S model. We apply our work on a large vocabulary ASR task. Compared to Slow AM, our Fast AM is 3–4× smaller and 11.5% relatively weaker in ASR accuracy. The proposed F&S AM achieves 4.7% relative gain over the Slow AM. We also report a progression of techniques and improve the relative gain to 8.1% by encoding additional Fast AM outputs. Our proposed framework has generic attributes — we demonstrate a specific extension by encoding two Slow models to achieve 12.2% relative gain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Tsubasa Ochiai|AUTHOR Tsubasa Ochiai]], [[Shigeki Karita|AUTHOR Shigeki Karita]], [[Hiroshi Sato|AUTHOR Hiroshi Sato]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Takanori Ashihara|AUTHOR Takanori Ashihara]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Yusuke Shinohara|AUTHOR Yusuke Shinohara]], [[Marc Delcroix|AUTHOR Marc Delcroix]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 546–550&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel training approach for encoder-decoder-based sequence-to-sequence (S2S) models. S2S models have been used successfully by the automatic speech recognition (ASR) community. The important key factor of S2S is the attention mechanism as it captures the relationships between input and output sequences. The attention weights inform which time frames should be attended to for predicting the output labels. In previous work, we proposed distilling S2S knowledge into connectionist temporal classification (CTC) based models by using the attention characteristics to create pseudo-targets for an auxiliary cross-entropy loss term. This approach can significantly improve CTC models. However, it remained unclear whether our proposal could be used to improve S2S models. In this paper, we extend our previous work to create a strong S2S model, i.e. Transformer with CTC (CTC-Transformer). We utilize Transformer outputs and the source attention weights for making pseudo-targets that contain both the posterior and the timing information of each Transformer output. These pseudo-targets are used to train the shared encoder of the CTC-Transformer through the use of direct feedback from the Transformer-decoder and thus obtain more informative representations. Experiments on public and private datasets to perform various tasks demonstrate that our proposal is also effective for enhancing S2S model training. In particular, on a Japanese ASR task, our best system outperforms the previous state-of-the-art alternative.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zoltán Tüske|AUTHOR Zoltán Tüske]]^^1^^, [[George Saon|AUTHOR George Saon]]^^1^^, [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]]^^2^^, [[Brian Kingsbury|AUTHOR Brian Kingsbury]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, USA; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 551–555&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It is generally believed that direct sequence-to-sequence (seq2seq) speech recognition models are competitive with hybrid models only when a large amount of data, at least a thousand hours, is available for training. In this paper, we show that state-of-the-art recognition performance can be achieved on the Switchboard-300 database using a single headed attention, LSTM based model. Using a cross-utterance language model, our single-pass speaker independent system reaches 6.4% and 12.5% word error rate (WER) on the Switchboard and CallHome subsets of Hub5’00, without a pronunciation lexicon. While careful regularization and data augmentation are crucial in achieving this level of performance, experiments on Switchboard-2000 show that nothing is more useful than more data. Overall, the combination of various regularizations and a simple but fairly large model results in a new state of the art, 4.8% and 8.3% WER on the Switchboard and CallHome sets, using SWB-2000 without any external data resources.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhehuai Chen|AUTHOR Zhehuai Chen]]^^1^^, [[Andrew Rosenberg|AUTHOR Andrew Rosenberg]]^^1^^, [[Yu Zhang|AUTHOR Yu Zhang]]^^1^^, [[Gary Wang|AUTHOR Gary Wang]]^^2^^, [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]]^^1^^, [[Pedro J. Moreno|AUTHOR Pedro J. Moreno]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^Simon Fraser University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 556–560&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Text-to-Speech synthesis (TTS) based data augmentation is a relatively new mechanism for utilizing text-only data to improve automatic speech recognition (ASR) training without parameter or inference architecture changes. However, efforts to train speech recognition systems on synthesized utterances suffer from limited acoustic diversity of TTS outputs. Additionally, the text-only corpus is always much larger than the transcribed speech corpus by several orders of magnitude, which makes speech synthesis of all the text data impractical. In this work, we propose to combine generative adversarial network (GAN) and multi-style training (MTR) to increase acoustic diversity in the synthesized data. We also present a contrastive language model-based data selection technique to improve the efficiency of learning from unspoken text. We demonstrate that our proposed method allows ASR models to learn from synthesis of large-scale unspoken text sources and achieves a 35% relative WER reduction on a voice-search task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yiwen Shao|AUTHOR Yiwen Shao]]^^1^^, [[Yiming Wang|AUTHOR Yiming Wang]]^^1^^, [[Daniel Povey|AUTHOR Daniel Povey]]^^2^^, [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^Xiaomi, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 561–565&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present PYCHAIN, a fully parallelized PyTorch implementation of end-to-end lattice-free maximum mutual information (LF-MMI) training for the so-called //chain models// in the Kaldi automatic speech recognition (ASR) toolkit. Unlike other PyTorch and Kaldi based ASR toolkits, PYCHAIN is designed to be as flexible and light-weight as possible so that it can be easily plugged into new ASR projects, or other existing PyTorch-based ASR tools, as exemplified respectively by a new project PYCHAIN-EXAMPLE, and ESPRESSO, an existing end-to-end ASR toolkit. PYCHAIN’s efficiency and flexibility is demonstrated through such novel features as full GPU training on numerator/denominator graphs, and support for unequal length sequences. Experiments on the WSJ dataset show that with simple neural networks and commonly used machine learning techniques, PYCHAIN can achieve competitive results that are comparable to Kaldi and better than other end-to-end ASR systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Keyu An|AUTHOR Keyu An]], [[Hongyu Xiang|AUTHOR Hongyu Xiang]], [[Zhijian Ou|AUTHOR Zhijian Ou]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 566–570&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a new open source toolkit for speech recognition, named CAT (__C__TC-CRF based __A__SR __T__oolkit). CAT inherits the data-efficiency of the hybrid approach and the simplicity of the E2E approach, providing a full-fledged implementation of CTC-CRFs and complete training and testing scripts for a number of English and Chinese benchmarks. Experiments show CAT obtains state-of-the-art results, which are comparable to the fine-tuned hybrid models in Kaldi but with a much simpler training pipeline. Compared to existing non-modularized E2E models, CAT performs better on limited-scale datasets, demonstrating its data efficiency. Furthermore, we propose a new method called contextualized soft forgetting, which enables CAT to do streaming ASR without accuracy degradation. We hope CAT, especially the CTC-CRF based framework and software, will be of broad interest to the community, and can be further explored and improved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hirofumi Inaguma|AUTHOR Hirofumi Inaguma]], [[Masato Mimura|AUTHOR Masato Mimura]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 571–575&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Monotonic chunkwise attention (MoChA) has been studied for the online streaming automatic speech recognition (ASR) based on a sequence-to-sequence framework. In contrast to connectionist temporal classification (CTC), backward probabilities cannot be leveraged in the alignment marginalization process during training due to left-to-right dependency in the decoder. This results in the error propagation of alignments to subsequent token generation. To address this problem, we propose CTC-synchronous training (CTC-ST), in which MoChA uses CTC alignments to learn optimal monotonic alignments. Reference CTC alignments are extracted from a CTC branch sharing the same encoder with the decoder. The entire model is jointly optimized so that the expected boundaries from MoChA are synchronized with the alignments. Experimental evaluations of the TEDLIUM release-2 and Librispeech corpora show that the proposed method significantly improves recognition, especially for long utterances. We also show that CTC-ST can bring out the full potential of SpecAugment for MoChA.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Brady Houston|AUTHOR Brady Houston]], [[Katrin Kirchhoff|AUTHOR Katrin Kirchhoff]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 576–580&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Using data from multiple dialects has shown promise in improving neural network acoustic models. While such training can improve the performance of an acoustic model on a single dialect, it can also produce a model capable of good performance on multiple dialects. However, training an acoustic model on pooled data from multiple dialects takes a significant amount of time and computing resources, and it needs to be retrained every time a new dialect is added to the model. In contrast, sequential transfer learning (fine-tuning) does not require retraining using all data, but may result in catastrophic forgetting of previously-seen dialects. Using data from four english dialects, we demonstrate that by using loss functions that mitigate catastrophic forgetting, sequential transfer learning can be used to train multi-dialect acoustic models that narrow the WER gap between the best (combined training) and worst (fine-tuning) case by up to 65%. Continual learning shows great promise in minimizing training time while approaching the performance of models that require much more training time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xingchen Song|AUTHOR Xingchen Song]]^^1^^, [[Zhiyong Wu|AUTHOR Zhiyong Wu]]^^1^^, [[Yiheng Huang|AUTHOR Yiheng Huang]]^^2^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 581–585&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, End-to-End (E2E) models have achieved state-of-the-art performance for automatic speech recognition (ASR). Within these large and deep models, overfitting remains an important problem that heavily influences the model performance. One solution to deal with the overfitting problem is to increase the quantity and variety of the training data with the help of data augmentation. In this paper, we present SpecSwap, a simple data augmentation scheme for automatic speech recognition that acts directly on the spectrogram of input utterances. The augmentation policy consists of swapping blocks of frequency channels and swapping blocks of time steps. We apply SpecSwap on Transformer-based networks for end-to-end speech recognition task. Our experiments on Aishell-1 show state-of-the-art performance for E2E models that are trained solely on the speech training data. Further, by increasing the depth of model, the Transformers trained with augmentations can outperform certain hybrid systems, even without the aid of a language model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adriana Stan|AUTHOR Adriana Stan]]
</p><p class="cpabstractcardaffiliationlist">Technical University of Cluj-Napoca, Romania</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 586–590&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep learning enables the development of efficient end-to-end speech processing applications while bypassing the need for expert linguistic and signal processing features. Yet, recent studies show that good quality speech resources and phonetic transcription of the training data can enhance the results of these applications. In this paper, the RECOApy tool is introduced. RECOApy streamlines the steps of data recording and pre-processing required in end-to-end speech-based applications. The tool implements an easy-to-use interface for prompted speech recording, spectrogram and waveform analysis, utterance-level normalisation and silence trimming, as well grapheme-to-phoneme conversion of the prompts in eight languages: Czech, English, French, German, Italian, Polish, Romanian and Spanish.

The grapheme-to-phoneme (G2P) converters are deep neural network (DNN) based architectures trained on lexicons extracted from the Wiktionary online collaborative resource. With the different degree of orthographic transparency, as well as the varying amount of phonetic entries across the languages, the DNN’s hyperparameters are optimised with an evolution strategy. The phoneme and word error rates of the resulting G2P converters are presented and discussed. The tool, the processed phonetic lexicons and trained G2P models are made freely available.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuan Shangguan|AUTHOR Yuan Shangguan]], [[Kate Knister|AUTHOR Kate Knister]], [[Yanzhang He|AUTHOR Yanzhang He]], [[Ian McGraw|AUTHOR Ian McGraw]], [[Françoise Beaufays|AUTHOR Françoise Beaufays]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 591–595&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The demand for fast and accurate incremental speech recognition increases as the applications of automatic speech recognition (ASR) proliferate. Incremental speech recognizers output chunks of partially recognized words while the user is still talking. Partial results can be revised before the ASR finalizes its hypothesis, causing instability issues. We analyze the quality and stability of on-device streaming end-to-end (E2E) ASR models. We first introduce a novel set of metrics that quantify the instability at word and segment levels. We study the impact of several model training techniques that improve E2E model qualities but degrade model stability. We categorize the causes of instability and explore various solutions to mitigate them in a streaming E2E ASR system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhe Liu|AUTHOR Zhe Liu]], [[Fuchun Peng|AUTHOR Fuchun Peng]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 596–600&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2020/MEDIA/1338" class="externallinkbutton" target="_blank">{{$:/causal/ZIP Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>A common question being raised in automatic speech recognition (ASR) evaluations is how reliable is an observed word error rate (WER) improvement comparing two ASR systems, where statistical hypothesis testing and confidence interval (CI) can be utilized to tell whether this improvement is real or only due to random chance. The bootstrap resampling method has been popular for such significance analysis which is intuitive and easy to use. However, this method fails in dealing with dependent data, which is prevalent in speech world — for example, ASR performance on utterances from the same speaker could be correlated. In this paper we present blockwise bootstrap approach — by dividing evaluation utterances into nonoverlapping blocks, this method resamples these blocks instead of original data. We show that the resulting variance estimator of absolute WER difference between two ASR systems is consistent under mild conditions. We also demonstrate the validity of blockwise bootstrap method on both synthetic and real-world speech data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anil Ramakrishna|AUTHOR Anil Ramakrishna]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]
</p><p class="cpabstractcardaffiliationlist">University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 601–605&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Psycholinguistic normatives represent various affective and mental constructs using numeric scores and are used in a variety of applications in natural language processing. They are commonly used at the sentence level, the scores of which are estimated by extrapolating word level scores using simple aggregation strategies, which may not always be optimal. In this work, we present a novel approach to estimate the psycholinguistic norms at sentence level. We apply a multidimensional annotation fusion model on annotations at the word level to estimate a parameter which captures relationships between different norms. We then use this parameter at sentence level to estimate the norms. We evaluate our approach by predicting sentence level scores for various normative dimensions and compare with standard word aggregation schemes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kai Fan|AUTHOR Kai Fan]]^^1^^, [[Bo Li|AUTHOR Bo Li]]^^2^^, [[Jiayi Wang|AUTHOR Jiayi Wang]]^^2^^, [[Shiliang Zhang|AUTHOR Shiliang Zhang]]^^2^^, [[Boxing Chen|AUTHOR Boxing Chen]]^^2^^, [[Niyu Ge|AUTHOR Niyu Ge]]^^1^^, [[Zhijie Yan|AUTHOR Zhijie Yan]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Alibaba Group, USA; ^^2^^Alibaba Group, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 606–610&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The performances of automatic speech recognition (ASR) systems are usually evaluated by the metric word error rate (WER) when the manually transcribed data are provided, which are, however, expensively available in the real scenario. In addition, the empirical distribution of WER for most ASR systems usually tends to put a significant mass near zero, making it difficult to simulate with a single continuous distribution. In order to address the two issues of ASR quality estimation (QE), we propose a novel neural zero-inflated model to predict the WER of the ASR result without transcripts. We design a neural zero-inflated beta regression on top of a bidirectional transformer language model conditional on speech features (speech-BERT). We adopt the pre-training strategy of token level masked language modeling for speech-BERT as well, and further fine-tune with our zero-inflated layer for the mixture of discrete and continuous outputs. The experimental results show that our approach achieves better performance on WER prediction compared with strong baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alejandro Woodward|AUTHOR Alejandro Woodward]], [[Clara Bonnín|AUTHOR Clara Bonnín]], [[Issey Masuda|AUTHOR Issey Masuda]], [[David Varas|AUTHOR David Varas]], [[Elisenda Bou-Balust|AUTHOR Elisenda Bou-Balust]], [[Juan Carlos Riveiro|AUTHOR Juan Carlos Riveiro]]
</p><p class="cpabstractcardaffiliationlist">Vilynx, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 611–615&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent improvements in Automatic Speech Recognition (ASR) systems have enabled the growth of myriad applications such as voice assistants, intent detection, keyword extraction and sentiment analysis. These applications, which are now widely used in the industry, are very sensitive to the errors generated by ASR systems. This could be overcome by having a reliable confidence measurement associated to the predicted output. This work presents a novel method which uses internal neural features of a frozen ASR model to train an independent neural network to predict a softmax temperature value. This value is computed in each decoder time step and multiplied by the logits in order to redistribute the output probabilities. The resulting softmax values corresponding to predicted tokens constitute a more reliable confidence measure. Moreover, this work also studies the effect of teacher forcing on the training of the proposed temperature prediction module. The output confidence estimation shows an improvement of -25.78% in EER and +7.59% in AUC-ROC with respect to the unaltered softmax values of the predicted tokens, evaluated on a proprietary dataset consisting on News and Entertainment videos.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ahmed Ali|AUTHOR Ahmed Ali]]^^1^^, [[Steve Renals|AUTHOR Steve Renals]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^HBKU, Qatar; ^^2^^University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 616–620&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Measuring the performance of automatic speech recognition (ASR) systems requires manually transcribed data in order to compute the word error rate (WER), which is often time-consuming and expensive. In this paper, we continue our effort in estimating WER using acoustic, lexical and phonotactic features. Our novel approach to estimate the WER uses a multistream end-to-end architecture. We report results for systems using internal speech decoder features (glass-box), systems without speech decoder features (black-box), and for systems without having access to the ASR system (no-box). The no-box system learns joint acoustic-lexical representation from phoneme recognition results along with MFCC acoustic features to estimate WER. Considering WER per sentence, our no-box system achieves 0.56 Pearson correlation with the reference evaluation and 0.24 root mean square error (RMSE) across 1,400 sentences. The estimated overall WER by e-WER2 is 30.9% for a three hours test set, while the WER computed using the reference transcriptions was 28.5%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bogdan Ludusan|AUTHOR Bogdan Ludusan]], [[Petra Wagner|AUTHOR Petra Wagner]]
</p><p class="cpabstractcardaffiliationlist">Universität Bielefeld, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 621–625&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With laughter research seeing a development in recent years, there is also an increased need in materials having laughter annotations. We examine in this study how one can leverage existing spontaneous speech resources to this goal. We first analyze the process of manual laughter annotation in corpora, by establishing two important parameters of the process: the amount of time required and its inter-rater reliability. Next, we propose a novel semi-automatic tool for laughter annotation, based on a signal-based representation of speech rhythm. We test both annotation approaches on the same recordings, containing German dyadic spontaneous interactions, and employing a larger pool of annotators than previously done. We then compare and discuss the obtained results based on the two aforementioned parameters, highlighting the benefits and costs associated to each approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joshua L. Martin|AUTHOR Joshua L. Martin]], [[Kevin Tang|AUTHOR Kevin Tang]]
</p><p class="cpabstractcardaffiliationlist">University of Florida, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 626–630&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent research has highlighted that state-of-the-art automatic speech recognition (ASR) systems exhibit a bias against African American speakers. In this research, we investigate the underlying causes of this racially based disparity in performance, focusing on a unique morpho-syntactic feature of African American English (AAE), namely habitual “be”, an invariant form of “be” that encodes the habitual aspect. By looking at over 100 hours of spoken AAE, we evaluated two ASR systems — DeepSpeech and Google Cloud Speech — to examine how well habitual “be” and its surrounding contexts are inferred. While controlling for local language and acoustic factors such as the amount of context, noise, and speech rate, we found that habitual “be” and its surrounding words were more error prone than non-habitual “be” and its surrounding words. These findings hold both when the utterance containing “be” is processed in isolation and in conjunction with surrounding utterances within speaker turn. Our research highlights the need for equitable ASR systems to take into account dialectal differences beyond acoustic modeling.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Georgia Zellou|AUTHOR Georgia Zellou]]^^1^^, [[Rebecca Scarborough|AUTHOR Rebecca Scarborough]]^^2^^, [[Renee Kemp|AUTHOR Renee Kemp]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of California at Davis, USA; ^^2^^University of Colorado Boulder, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 631–635&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A production study explored the acoustic characteristics of /æ/ in CVC and CVN words spoken by California speakers who raise /æ/ in pre-nasal contexts. Results reveal that the phonetic realization of the /æ/-/ε/ contrast in these contexts is multidimensional. Raised pre-nasal /æ/ is close in formant space to /ε/, particularly over the second half of the vowel. Yet, systematic differences in the realization of the secondary acoustic features of duration, formant movement, and degree of coarticulatory vowel nasalization keep these vowels phonetically distinct. These findings have implications for systems of vowel contrast and the use of secondary phonetic properties to maintain lexical distinctions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Louis-Marie Lorin|AUTHOR Louis-Marie Lorin]]^^1^^, [[Lorenzo Maselli|AUTHOR Lorenzo Maselli]]^^2^^, [[Léo Varnet|AUTHOR Léo Varnet]]^^3^^, [[Maria Giavazzi|AUTHOR Maria Giavazzi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ENS, France; ^^2^^Scuola Normale Superiore, Italy; ^^3^^LSP (UMR 8248), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 636–640&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Languages tend to license segmental contrasts where they are maximally perceptible, i.e. where more perceptual cues to the contrast are available. For strident fricatives, the most salient cues to the presence of voicing are low-frequency energy concentrations and fricative duration, as voiced fricatives are systematically shorter than voiceless ones. Cross-linguistically, the voicing contrast is more frequently realized word-initially than word-finally, as for obstruents. We investigate the phonetic underpinnings of this asymmetric behavior at the word edges, focusing on the availability of durational cues to the contrast in the two positions. To assess segmental duration, listeners rely on temporal markers, i.e. jumps in acoustic energy which demarcate segmental boundaries, thereby facilitating duration discrimination. We conducted an acoustic analysis of word-initial and word-final strident fricatives in American English. We found that temporal markers are sharper at the left edge of word-initial fricatives than at the right edge of word-final fricatives, in terms of absolute value of the intensity slope, in the high-frequency region. These findings allow us to make predictions about the availability of durational cues to the voicing contrast in the two positions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mingqiong Luo|AUTHOR Mingqiong Luo]]
</p><p class="cpabstractcardaffiliationlist">SISU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 641–645&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It is well known that in Mandarin Chinese (MC) nasal rhymes, non-high vowels /a/ and /e/ undergo Vowel Nasalization and Backness Feature Specification processes to harmonize with the nasal coda in both manner and place of articulation. Specifically, the vowel is specified with the [+front] feature when followed by the /n/ coda and the [+back] feature when followed by /ŋ/. On the other hand, phonetic experiments in recent researches have shown that in MC disyllabic words, the nasal coda tends to undergo place assimilation in the V,,1,,N.C,,2,,V,,2,, context and complete deletion in the V,,1,,N.V,,2,, context.

These processes raises two questions: firstly, will V,,1,, in V,,1,,N.C,,2,,V,,2,, contexts also change in its backness feature to harmonize with the assimilated nasal coda? Secondly, will the duration of V,,1,,N reduce significantly after nasal coda deletion in the V,,1,,N.(G)V context?

A production experiment and a perception experiment were designed to answer these two questions. Results show that the vowel backness feature of V,,1,, is not re-specified despite the appropriate environment, and the duration of V,,1,,N is not reduced after nasal deletion. The phonological consequences of these findings will be discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yang Yue|AUTHOR Yang Yue]]^^1^^, [[Fang Hu|AUTHOR Fang Hu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UCASS, China; ^^2^^CASS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 646–650&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper gives an acoustic phonetic description of the obstruents in the Hangzhou Wu Chinese dialect. Based on the data from 8 speakers (4 male and 4 female), obstruents were examined in terms of VOT, silent closure duration, segment duration, and spectral properties such as H1-H2, H1-F1 and H1-F3. Results suggest that VOT cannot differentiate the voiced obstruents from their voiceless counterparts, but the silent closure duration can. There is no voiced aspiration. And breathiness was detected on the vowel following the voiced category of obstruents. An acoustic consequence is that there is no segment for the voiced glottal fricative [ɦ], since it was realized as the breathiness on the following vowel. But interestingly, it is observed that syllables with [ɦ] are longer than their onset-less counterparts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lei Wang|AUTHOR Lei Wang]]
</p><p class="cpabstractcardaffiliationlist">ECUST, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 651–655&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this present study, we re-analyze the vowel system in Kaifeng Mandarin, adopting a phoneme-based approach. Our analysis deviates from the previous syllable-based analyses in a number of ways. First, we treat apical vowels [ɿ ʅ] as syllabic approximants and analyze them as allophones of the retroflex approximant /ɻ/. Second, the vowel inventory is of three sets, monophthongs, diphthongs and retroflex vowels. The classification of monophthongs and diphthongs is based on the phonological distribution of the coda nasal. That is, monophthongs can be followed by a nasal coda, while diphthongs cannot. This argument has introduced two new opening diphthongs /eε ɤʌ/ in the inventory, which have traditionally been described as monophthongs. Our phonological characterization of the vowels in Kaifeng Mandarin is further backed up by acoustic data. It is argued that the present study has gone some way towards enhancing our understanding of Mandarin segmental phonology in general.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Margaret Zellers|AUTHOR Margaret Zellers]]^^1^^, [[Barbara Schuppler|AUTHOR Barbara Schuppler]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Christian-Albrechts-Universität zu Kiel, Germany; ^^2^^Technische Universität Graz, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 656–660&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Fundamental frequency (F0) contours may show slight, microprosodic variations in the vicinity of plosive segments, which may have distinctive patterns relative to the place of articulation and voicing. Similarly, plosive bursts have distinctive characteristics associated with these articulatory features. The current study investigates the degree to which such microprosodic variations arise in two varieties of German, and how the two varieties differ. We find that microprosodic effects indeed arise in F0 as well as burst intensity and Center of Gravity, but that the extent of the variability is different in the two varieties under investigation, with northern German tending towards more variability in the microprosody of plosives than Austrian German. Coarticulatory effects on the burst with the following segment also arise, but also have different features in the two varieties. This evidence is consistent with the possibility that the fortis-lenis contrast is not equally stable in Austrian German and northern German.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jing Huang|AUTHOR Jing Huang]], [[Feng-fan Hsieh|AUTHOR Feng-fan Hsieh]], [[Yueh-chin Chang|AUTHOR Yueh-chin Chang]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 661–665&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper is an articulatory study of the //er//-suffixation (a.k.a. //erhua//) in Southwestern Mandarin (SWM), using co-registered EMA and ultrasound. Data from two female speakers in their twenties were analyzed and discussed. Our recording materials contain unsuffixed stems, //er//-suffixed forms and the rhotic schwa [ɚ], a phonemic vowel in its own right. Results suggest that the //er//-suffixation in SWM involves suffixing a rhotic schwa [ɚ] to the stem, unlike its counterpart in Beijing and Northeastern Mandarin [5]. Specifically, an entire rime will be replaced with the //er//-suffix if the nucleus vowel is non-high; only high vocoids will be preserved after the //er//-suffixation. The “rhoticity” is primarily realized as a //bunched// tongue shape configuration (i.e. a domed tongue body), while the Tongue Tip gesture plays a more limited role in SWM. A phonological analysis is accordingly proposed for the //er//-suffixation in SWM.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yinghao Li|AUTHOR Yinghao Li]], [[Jinghua Zhang|AUTHOR Jinghua Zhang]]
</p><p class="cpabstractcardaffiliationlist">Yanbian University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 666–670&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper examined the phonatory features induced by the tripartite plosives in Yanbian Korean, broadly considered as Hamkyungbukdo Korean dialect. Electroglottographic (EGG) and acoustic analysis was applied for five elderly Korean speakers. The results show that fortis-induced phonation is characterized with more constricted glottis, slower spectral tilt, and higher sub-harmonic-harmonic ratio. Lenis-induced phonation is shown to be breathier with smaller Contact Quotient and faster spectral tilt. Most articulatory and acoustic measures for the aspirated are shown to be patterned with the lenis; However, sporadic difference between the two indicates that the lenis induces more breathier phonation. The diplophonia phonation is argued to be a salient feature for the fortis-head syllables in Yanbian Korean. The vocal fold medial compression and adductive tension mechanisms are tentatively argued to be responsible for the production of the fortis. At last, gender difference is shown to be salient in the fortis-induced phonation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nicholas Wilkins|AUTHOR Nicholas Wilkins]]^^1^^, [[Max Cordes Galbraith|AUTHOR Max Cordes Galbraith]]^^2^^, [[Ifeoma Nwogu|AUTHOR Ifeoma Nwogu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Rochester Institute of Technology, USA; ^^2^^Sign-Speak, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 671–675&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we consider the problem of computationally representing American Sign Language (ASL) phonetics. We specifically present a computational model inspired by the sequential phonological ASL representation, known as the Movement-Hold (MH) Model. Our computational model is capable of not only capturing ASL phonetics, but also has generative abilities. We present a Probabilistic Graphical Model (PGM) which explicitly models holds and implicitly models movement in the MH model. For evaluation, we introduce a novel data corpus, ASLing, and compare our PGM to other models (GMM, LDA, and VAE) and show its superior performance. Finally, we demonstrate our model’s interpretability by computing various phonetic properties of ASL through the inspection of our learned model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hang Li|AUTHOR Hang Li]], [[Siyuan Chen|AUTHOR Siyuan Chen]], [[Julien Epps|AUTHOR Julien Epps]]
</p><p class="cpabstractcardaffiliationlist">UNSW Sydney, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 676–680&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In a variety of conversation contexts, accurately predicting the time point at which a conversational participant is about to speak can help improve computer-mediated human-human communications. Although it is not difficult for a human to perceive turn-taking intent in conversations, it has been a challenging task for computers to date. In this study, we employed eye activity acquired from low-cost wearable hardware during natural conversation and studied how pupil diameter, blink and gaze direction could assist speech in voice activity and turn-taking prediction. Experiments on a new 2-hour corpus of natural conversational speech between six pairs of speakers wearing near-field eye video glasses revealed that the F1 score for predicting the voicing activity up to 1s ahead of the current instant can be above 80%, for speech and non-speech detection with fused eye and speech features. Further, extracting features synchronously from both interlocutors provides a relative reduction in error rate of 8.5% compared with a system based on just a single speaker. The performance of four turn-taking states based on the predicted voice activity also achieved F1 scores significantly higher than chance level. These findings suggest that wearable eye activity can play a role in future speech communication systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Ye Bai|AUTHOR Ye Bai]], [[Cunhang Fan|AUTHOR Cunhang Fan]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 721–725&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many approaches have been proposed to predict punctuation marks. Previous results demonstrate that these methods are effective. However, there still exists class imbalance problem during training. Most of the classes in the training set for punctuation prediction are non-punctuation marks. This will affect the performance of punctuation prediction tasks. Therefore, this paper uses a focal loss to alleviate this issue. The focal loss can down-weight easy examples and focus training on a sparse set of hard examples. Experiments are conducted on IWSLT2011 datasets. The results show that the punctuation predicting models trained with a focal loss obtain performance improvement over that trained with a cross entropy loss by up to 2.7% absolute overall F,,1,,-score on test set. The proposed model also outperforms previous state-of-the-art models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weiyi Lu|AUTHOR Weiyi Lu]], [[Yi Xu|AUTHOR Yi Xu]], [[Peng Yang|AUTHOR Peng Yang]], [[Belinda Zeng|AUTHOR Belinda Zeng]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 681–685&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice assistants such as Siri, Alexa, etc. usually adopt a pipeline to process users’ utterances, which generally include transcribing the audio into text, understanding the text, and finally responding back to users. One potential issue is that some utterances could be devoid of any interesting speech, and are thus not worth being processed through the entire pipeline. Examples of uninteresting utterances include those that have too much noise, are devoid of intelligible speech, etc. It is therefore desirable to have a model to filter out such useless utterances before they are ingested for downstream processing, thus saving system resources. Towards this end, we propose the Combination of Audio and Metadata (CAM) detector to identify utterances that contain only uninteresting speech. Our experimental results show that the CAM detector considerably outperforms using either an audio model or a metadata model alone, which demonstrates the effectiveness of the proposed system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Diamantino Caseiro|AUTHOR Diamantino Caseiro]], [[Pat Rondon|AUTHOR Pat Rondon]], [[Quoc-Nam Le The|AUTHOR Quoc-Nam Le The]], [[Petar Aleksic|AUTHOR Petar Aleksic]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 686–690&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end (E2E) mixed-case automatic speech recognition (ASR) systems that directly predict words in the written domain are attractive due to being simple to build, not requiring explicit capitalization models, allowing streaming capitalization without additional effort beyond that required for streaming ASR, and their small size. However, the fact that these systems produce various versions of the same word with different capitalizations, and even different word segmentations for different case variants when wordpieces (WP) are predicted, leads to multiple problems with contextual ASR. In particular, the size of and time to build contextual models grows considerably with the number of variants per word. In this paper, we propose separating orthographic recognition from capitalization, so that the ASR system first predicts a word, then predicts its capitalization in the form of a capitalization mask. We show that the use of capitalization masks achieves the same low error rate as traditional mixed-case ASR, while reducing the size and compilation time of contextual models. Furthermore, we observe significant improvements in capitalization quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Huanru Henry Mao|AUTHOR Huanru Henry Mao]], [[Shuyang Li|AUTHOR Shuyang Li]], [[Julian McAuley|AUTHOR Julian McAuley]], [[Garrison W. Cottrell|AUTHOR Garrison W. Cottrell]]
</p><p class="cpabstractcardaffiliationlist">University of California at San Diego, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 691–695&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech recognition (ASR) and speaker diarization (SD) models have traditionally been trained separately to produce rich conversation transcripts with speaker labels. Recent advances [1] have shown that joint ASR and SD models can learn to leverage audio-lexical inter-dependencies to improve word diarization performance. We introduce a new benchmark of hour-long podcasts collected from the weekly //This American Life// radio program to better compare these approaches when applied to extended multi-speaker conversations. We find that training separate ASR and SD models perform better when utterance boundaries are known but otherwise joint models can perform better. To handle long conversations with unknown utterance boundaries, we introduce a striding attention decoding algorithm and data augmentation techniques which, combined with model pre-training, improves ASR and SD.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mengzhe Geng|AUTHOR Mengzhe Geng]], [[Xurong Xie|AUTHOR Xurong Xie]], [[Shansong Liu|AUTHOR Shansong Liu]], [[Jianwei Yu|AUTHOR Jianwei Yu]], [[Shoukang Hu|AUTHOR Shoukang Hu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 696–700&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Disordered speech recognition is a highly challenging task. The underlying neuro-motor conditions of people with speech disorders, often compounded with co-occurring physical disabilities, lead to the difficulty in collecting large quantities of speech required for system development. This paper investigates a set of data augmentation techniques for disordered speech recognition, including vocal tract length perturbation (VTLP), tempo perturbation and speed perturbation. Both normal and disordered speech were exploited in the augmentation process. Variability among impaired speakers in both the original and augmented data was modeled using learning hidden unit contributions (LHUC) based speaker adaptive training. The final speaker adapted system constructed using the UASpeech corpus and the best augmentation approach based on speed perturbation produced up to 2.92% absolute (9.3% relative) word error rate (WER) reduction over the baseline system without data augmentation, and gave an overall WER of 26.37% on the test set containing 16 dysarthric speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenqi Wei|AUTHOR Wenqi Wei]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Jiteng Ma|AUTHOR Jiteng Ma]], [[Ning Cheng|AUTHOR Ning Cheng]], [[Jing Xiao|AUTHOR Jing Xiao]]
</p><p class="cpabstractcardaffiliationlist">Ping An Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 701–705&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a real-time robot-based auxiliary system for risk evaluation of COVID-19 infection. It combines real-time speech recognition, temperature measurement, keyword detection, cough detection and other functions in order to convert live audio into actionable structured data to achieve the COVID-19 infection risk assessment function. In order to better evaluate the COVID-19 infection, we propose an end-to-end method for cough detection and classification for our proposed system. It is based on real conversation data from human-robot, which processes speech signals to detect cough and classifies it if detected. The structure of our model are maintained concise to be implemented for real-time applications. And we further embed this entire auxiliary diagnostic system in the robot and it is placed in the communities, hospitals and supermarkets to support COVID-19 testing. The system can be further leveraged within a business rules engine, thus serving as a foundation for real-time supervision and assistance applications. Our model utilizes a pretrained, robust training environment that allows for efficient creation and customization of customer-specific health states.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David S. Barbera|AUTHOR David S. Barbera]]^^1^^, [[Mark Huckvale|AUTHOR Mark Huckvale]]^^1^^, [[Victoria Fleming|AUTHOR Victoria Fleming]]^^1^^, [[Emily Upton|AUTHOR Emily Upton]]^^1^^, [[Henry Coley-Fisher|AUTHOR Henry Coley-Fisher]]^^1^^, [[Ian Shaw|AUTHOR Ian Shaw]]^^2^^, [[William Latham|AUTHOR William Latham]]^^3^^, [[Alexander P. Leff|AUTHOR Alexander P. Leff]]^^1^^, [[Jenny Crinion|AUTHOR Jenny Crinion]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University College London, UK; ^^2^^SoftV, UK; ^^3^^Goldsmiths, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 706–710&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Anomia (word finding difficulties) is the hallmark of aphasia an acquired language disorder, most commonly caused by stroke. Assessment of speech performance using picture naming tasks is therefore a key method for identification of the disorder and monitoring patient’s response to treatment interventions. Currently, this assessment is conducted manually by speech and language therapists (SLT). Surprisingly, despite advancements in ASR and artificial intelligence with technologies like deep learning, research on developing automated systems for this task has been scarce. Here we present an utterance verification system incorporating a deep learning element that classifies ‘correct’/‘incorrect’ naming attempts from aphasic stroke patients. When tested on 8 native British-English speaking aphasics the system’s performance accuracy ranged between 83.6% to 93.6%, with a 10 fold cross validation mean of 89.5%. This performance was not only significantly better than one of the leading commercially available ASRs (Google speech-to-text service) but also comparable in some instances with two independent SLT ratings for the same dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shansong Liu|AUTHOR Shansong Liu]]^^1^^, [[Xurong Xie|AUTHOR Xurong Xie]]^^1^^, [[Jianwei Yu|AUTHOR Jianwei Yu]]^^1^^, [[Shoukang Hu|AUTHOR Shoukang Hu]]^^1^^, [[Mengzhe Geng|AUTHOR Mengzhe Geng]]^^1^^, [[Rongfeng Su|AUTHOR Rongfeng Su]]^^2^^, [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]]^^3^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^CAS, China; ^^3^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 711–715&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audio-visual speech recognition (AVSR) technologies have been successfully applied to a wide range of tasks. When developing AVSR systems for disordered speech characterized by severe degradation of voice quality and large mismatch against normal, it is difficult to record large amounts of high quality audio-visual data. In order to address this issue, a cross-domain visual feature generation approach is proposed in this paper. Audio-visual inversion DNN system constructed using widely available out-of-domain audio-visual data was used to generate visual features for disordered speakers for whom video data is either very limited or unavailable. Experiments conducted on the UASpeech corpus suggest that the proposed cross-domain visual feature generation based AVSR system consistently outperformed the baseline ASR system and AVSR system using original visual features. An overall word error rate reduction of 3.6% absolute (14% relative) was obtained over the previously published best system on the 8 UASpeech dysarthric speakers with audio-visual data of the same task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Binghuai Lin|AUTHOR Binghuai Lin]], [[Liyuan Wang|AUTHOR Liyuan Wang]]
</p><p class="cpabstractcardaffiliationlist">Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 716–720&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken language transcripts generated from Automatic speech recognition (ASR) often contain a large portion of disfluency and lack punctuation symbols. Punctuation restoration and disfluency removal of the transcripts can facilitate downstream tasks such as machine translation, information extraction and syntactic analysis [1]. Various studies have shown the influence between these two tasks and thus performed modeling based on a multi-task learning (MTL) framework [2, 3], which learns general representations in the shared layers and separate representations in the task-specific layers. However, task dependencies are normally ignored in the task-specific layers. To model the dependencies of tasks, we propose an attention-based structure in the task-specific layers of the MTL framework incorporating the pretrained BERT (a state-of-art NLP-related model) [4]. Experimental results based on English IWSLT dataset and the Switchboard dataset show the proposed architecture outperforms the separate modeling methods as well as the traditional MTL methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jing-Xuan Zhang|AUTHOR Jing-Xuan Zhang]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 771–775&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents an adversarial learning method for recognition-synthesis based non-parallel voice conversion. A recognizer is used to transform acoustic features into linguistic representations while a synthesizer recovers output features from the recognizer outputs together with the speaker identity. By separating the speaker characteristics from the linguistic representations, voice conversion can be achieved by replacing the speaker identity with the target one. In our proposed method, a speaker adversarial loss is adopted in order to obtain speaker-independent linguistic representations using the recognizer. Furthermore, discriminators are introduced and a generative adversarial network (GAN) loss is used to prevent the predicted features from being over-smoothed. For training model parameters, a strategy of pre-training on a multi-speaker dataset and then fine-tuning on the source-target speaker pair is designed. Our method achieved higher similarity than the baseline model that obtained the best performance in Voice Conversion Challenge 2018.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shaojin Ding|AUTHOR Shaojin Ding]], [[Guanlong Zhao|AUTHOR Guanlong Zhao]], [[Ricardo Gutierrez-Osuna|AUTHOR Ricardo Gutierrez-Osuna]]
</p><p class="cpabstractcardaffiliationlist">Texas A&M University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 776–780&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Phonetic Posteriorgrams (PPGs) have received much attention for non-parallel many-to-many Voice Conversion (VC), and have been shown to achieve state-of-the-art performance. These methods implicitly assume that PPGs are speaker-independent and contain only linguistic information in an utterance. In practice, however, PPGs carry speaker individuality cues, such as accent, intonation, and speaking rate. As a result, these cues can leak into the voice conversion, making it sound similar to the source speaker. To address this issue, we propose an adversarial learning approach that can remove speaker-dependent information in VC models based on a PPG2speech synthesizer. During training, the encoder output of a PPG2speech synthesizer is fed to a classifier trained to identify the corresponding speaker, while the encoder is trained to fool the classifier. As a result, a more speaker-independent representation is learned. The proposed method is advantageous as it does not require pre-training the speaker classifier, and the adversarial speaker classifier is jointly trained with the PPG2speech synthesizer end-to-end. We conduct objective and subjective experiments on the CSTR VCTK Corpus under standard and one-shot VC conditions. Results show that the proposed method significantly improves the speaker identity of VC syntheses when compared with a baseline system trained without adversarial learning.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanping Li|AUTHOR Yanping Li]]^^1^^, [[Dongxiang Xu|AUTHOR Dongxiang Xu]]^^1^^, [[Yan Zhang|AUTHOR Yan Zhang]]^^2^^, [[Yang Wang|AUTHOR Yang Wang]]^^3^^, [[Binbin Chen|AUTHOR Binbin Chen]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NJUPT, China; ^^2^^JIT, China; ^^3^^vivo, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 781–785&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice Conversion (VC) aims at modifying source speaker’s speech to sound like that of target speaker while preserving linguistic information of given speech. StarGAN-VC was recently proposed, which utilizes a variant of Generative Adversarial Networks (GAN) to perform non-parallel many-to-many VC. However, the quality of generated speech is not satisfactory enough. An improved method named “PSR-StarGAN-VC” is proposed in this paper by incorporating three improvements. Firstly, perceptual loss functions are introduced to optimize the generator in StarGAN-VC aiming to learn high-level spectral features. Secondly, considering that Switchable Normalization (SN) could learn different operations in different normalization layers of model, it is introduced to replace Batch Normalization (BN) in StarGAN-VC. Lastly, Residual Network (ResNet) is applied to establish the mapping of different layers between the encoder and decoder of generator aiming to retain more semantic features when converting speech, and to reduce the difficulty of training. Experiment results on the VCC 2018 datasets demonstrate superiority of the proposed method in terms of naturalness and speaker similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adam Polyak|AUTHOR Adam Polyak]], [[Lior Wolf|AUTHOR Lior Wolf]], [[Yaniv Taigman|AUTHOR Yaniv Taigman]]
</p><p class="cpabstractcardaffiliationlist">Facebook, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 786–790&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a fully convolutional wav-to-wav network for converting between speakers’ voices, without relying on text. Our network is based on an encoder-decoder architecture, where the encoder is pre-trained for the task of Automatic Speech Recognition, and a multi-speaker waveform decoder is trained to reconstruct the original signal in an autoregressive manner. We train the network on narrated audiobooks, and demonstrate multi-voice TTS in those voices, by converting the voice of a TTS robot.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zining Zhang|AUTHOR Zining Zhang]]^^1^^, [[Bingsheng He|AUTHOR Bingsheng He]]^^2^^, [[Zhenjie Zhang|AUTHOR Zhenjie Zhang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^YITU Technology, Singapore; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 791–795&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Non-parallel many-to-many voice conversion is recently attracting huge research efforts in the speech processing community. A voice conversion system transforms an utterance of a source speaker to another utterance of a target speaker by keeping the content in the original utterance and replacing by the vocal features from the target speaker. Existing solutions, e.g., StarGAN-VC2, present promising results, //only// when speech corpus of the engaged speakers is available during model training. AUTOVC is able to perform voice conversion on unseen speakers, but it needs an external pretrained speaker verification model. In this paper, we present our new GAN-based zero-shot voice conversion solution, called GAZEV, which targets to support unseen speakers on both source and target utterances. Our key technical contribution is the adoption of speaker embedding loss on top of the GAN framework, as well as adaptive instance normalization strategy, in order to address the limitations of speaker identity transfer in existing solutions. Our empirical evaluations demonstrate significant performance improvement on output speech quality, and comparable speaker similarity to AUTOVC.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tao Wang|AUTHOR Tao Wang]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Ruibo Fu|AUTHOR Ruibo Fu]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Rongxiu Zhong|AUTHOR Rongxiu Zhong]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 796–800&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The low similarity and naturalness of synthesized speech remain a challenging problem for speaker adaptation with few resources. Since the acoustic model is too complex to interpret, overfitting will occur when training with few data. To prevent the model from overfitting, this paper proposes a novel speaker adaptation framework that decomposes the parameter space of the end-to-end acoustic model into two parts, with the one on predicting spoken content and the other on modeling speaker’s voice. The spoken content is represented by phone posteriorgram (PPG) which is speaker independent. By adapting the two sub-modules separately, the overfitting can be alleviated effectively. Moreover, we propose two different adaptation strategies based on whether the data has text annotation. In this way, speaker adaptation can also be performed without text annotations. Experimental results confirm the adaptability of our proposed method of factorizating spoken content and voice. Listening tests demonstrate that our proposed method can achieve better performance with just 10 sentences than speaker adaptation conducted on Tacotron in terms of naturalness and speaker similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adam Polyak|AUTHOR Adam Polyak]], [[Lior Wolf|AUTHOR Lior Wolf]], [[Yossi Adi|AUTHOR Yossi Adi]], [[Yaniv Taigman|AUTHOR Yaniv Taigman]]
</p><p class="cpabstractcardaffiliationlist">Facebook, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 801–805&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a wav-to-wav generative model for the task of singing voice conversion from any identity. Our method utilizes both an acoustic model, trained for the task of automatic speech recognition, together with melody extracted features to drive a waveform-based generator. The proposed generative architecture is invariant to the speaker’s identity and can be trained to generate target singers from unlabeled training data, using either speech or singing sources. The model is optimized in an end-to-end fashion without any manual supervision, such as lyrics, musical notes or parallel samples. The proposed approach is fully-convolutional and can generate audio in realtime. Experiments show that our method significantly outperforms the baseline methods while generating convincingly better audio samples than alternative attempts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tatsuma Ishihara|AUTHOR Tatsuma Ishihara]]^^1^^, [[Daisuke Saito|AUTHOR Daisuke Saito]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^GREE, Japan; ^^2^^University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 806–810&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a novel approach to embed speaker information to feature vectors at frame level using an attention mechanism, and its application to one-shot voice conversion. A one-shot voice conversion system is a type of voice conversion system where only one utterance from a target speaker is available for conversion. In many one-shot voice conversion systems, a speaker encoder mechanism compresses an utterance of the target speaker into a fixed-size vector for propagating speaker information. However, the obtained representation has lost temporal information related to speaker identities and it could degrade conversion quality. To alleviate this problem, we propose a novel way to embed speaker information using an attention mechanism. Instead of compressing into a fixed-size vector, our proposed speaker encoder outputs a sequence of speaker embedding vectors. The obtained sequence is selectively combined with input frames of a source speaker by an attention mechanism. Finally the obtained time varying speaker information is utilized for a decoder to generate the converted features. Objective evaluation showed that our method reduced the averaged mel-cepstrum distortion to 5.23 dB from 5.34 dB compared with the baseline system. The subjective preference test showed that our proposed system outperformed the baseline one.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jian Cong|AUTHOR Jian Cong]]^^1^^, [[Shan Yang|AUTHOR Shan Yang]]^^1^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^, [[Guoqiao Yu|AUTHOR Guoqiao Yu]]^^2^^, [[Guanglu Wan|AUTHOR Guanglu Wan]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^Meituan-Dianping Group, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 811–815&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Data efficient voice cloning aims at synthesizing target speaker’s voice with only a few enrollment samples at hand. To this end, speaker adaptation and speaker encoding are two typical methods based on base model trained from multiple speakers. The former uses a small set of target speaker data to transfer the multi-speaker model to target speaker’s voice through direct model update, while in the latter, only a few seconds of target speaker’s audio directly goes through an extra speaker encoding model along with the multi-speaker model to synthesize target speaker’s voice without model update. Nevertheless, the two methods need clean target speaker data. However, the samples provided by user may inevitably contain acoustic noise in real applications. It’s still challenging to generating target voice with noisy data. In this paper, we study the data efficient voice cloning problem from noisy samples under the sequence-to-sequence based TTS paradigm. Specifically, we introduce domain adversarial training (DAT) to speaker adaptation and speaker encoding, which aims to disentangle noise from speech-noise mixture. Experiments show that for both speaker adaptation and encoding, the proposed approaches can consistently synthesize clean speech from noisy speaker samples, apparently outperforming the method adopting state-of-the-art speech enhancement module.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sixin Hong|AUTHOR Sixin Hong]]^^1^^, [[Yuexian Zou|AUTHOR Yuexian Zou]]^^1^^, [[Wenwu Wang|AUTHOR Wenwu Wang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Peking University, China; ^^2^^University of Surrey, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 816–820&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multiple instance learning (MIL) has recently been used for weakly labelled audio tagging, where the spectrogram of an audio signal is divided into segments to form instances in a bag, and then the low-dimensional features of these segments are pooled for tagging. The choice of a pooling scheme is the key to exploiting the weakly labelled data. However, the traditional pooling schemes are usually fixed and unable to distinguish the contributions, making it difficult to adapt to the characteristics of the sound events. In this paper, a novel pooling algorithm is proposed for MIL, named gated multi-head attention pooling (GMAP), which is able to attend to the information of events from different heads at different positions. Each head allows the model to learn information from different representation subspaces. Furthermore, in order to avoid the redundancy of multi-head information, a gating mechanism is used to fuse individual head features. The proposed GMAP increases the modeling power of the single-head attention with no computational overhead. Experiments are carried out on Audioset, which is a large-scale weakly labelled dataset, and show superior results to the non-adaptive pooling and the vanilla attention pooling schemes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amit Jindal|AUTHOR Amit Jindal]]^^1^^, [[Narayanan Elavathur Ranganatha|AUTHOR Narayanan Elavathur Ranganatha]]^^1^^, [[Aniket Didolkar|AUTHOR Aniket Didolkar]]^^1^^, [[Arijit Ghosh Chowdhury|AUTHOR Arijit Ghosh Chowdhury]]^^1^^, [[Di Jin|AUTHOR Di Jin]]^^2^^, [[Ramit Sawhney|AUTHOR Ramit Sawhney]]^^3^^, [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MAHE, India; ^^2^^MIT, USA; ^^3^^NSUT, India; ^^4^^IIIT Delhi, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 861–865&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents SpeechMix, a regularization and data augmentation technique for deep sound recognition. Our strategy is to create virtual training samples by interpolating speech samples in hidden space. SpeechMix has the potential to generate an infinite number of new augmented speech samples since the combination of speech samples is continuous. Thus, it allows downstream models to avoid overfitting drastically. Unlike other mixing strategies that only work on the input space, we apply our method on the intermediate layers to capture a broader representation of the feature space. Through an extensive quantitative evaluation, we demonstrate the effectiveness of SpeechMix in comparison to standard learning regimes and previously applied mixing strategies. Furthermore, we highlight how different hidden layers contribute to the improvements in classification using an ablation study.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Helin Wang|AUTHOR Helin Wang]]^^1^^, [[Yuexian Zou|AUTHOR Yuexian Zou]]^^1^^, [[Dading Chong|AUTHOR Dading Chong]]^^1^^, [[Wenwu Wang|AUTHOR Wenwu Wang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Peking University, China; ^^2^^University of Surrey, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 821–825&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Convolutional neural networks (CNN) are one of the best-performing neural network architectures for environmental sound classification (ESC). Recently, temporal attention mechanisms have been used in CNN to capture the useful information from the relevant time frames for audio classification, especially for weakly labelled data where the onset and offset times of the sound events are not applied. In these methods, however, the inherent spectral characteristics and variations are not explicitly exploited when obtaining the deep features. In this paper, we propose a novel parallel temporal-spectral attention mechanism for CNN to learn discriminative sound representations, which enhances the temporal and spectral features by capturing the importance of different time frames and frequency bands. Parallel branches are constructed to allow temporal attention and spectral attention to be applied respectively in order to mitigate interference from the segments without the presence of sound events. The experiments on three environmental sound classification (ESC) datasets and two acoustic scene classification (ASC) datasets show that our method improves the classification performance and also exhibits robustness to noise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Luyu Wang|AUTHOR Luyu Wang]], [[Kazuya Kawakami|AUTHOR Kazuya Kawakami]], [[Aaron van den Oord|AUTHOR Aaron van den Oord]]
</p><p class="cpabstractcardaffiliationlist">DeepMind, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 826–830&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With the vast amount of audio data available, powerful sound representations can be learned with self-supervised methods even in the absence of explicit annotations. In this work we investigate learning general audio representations directly from raw signals using the Contrastive Predictive Coding objective. We further extend it by leveraging ideas from adversarial machine learning to produce additive perturbations that effectively makes the learning harder, such that the predictive tasks will not be distracted by trivial details. We also look at the effects of different design choices for the objective, including the nonlinear similarity measure and the way the negatives are drawn. Combining these contributions our models are able to considerably outperform previous spectrogram-based unsupervised methods. On AudioSet we observe a relative improvement of 14% in mean average precision over the state of the art with half the size of the training data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Arjun Pankajakshan|AUTHOR Arjun Pankajakshan]], [[Helen L. Bear|AUTHOR Helen L. Bear]], [[Vinod Subramanian|AUTHOR Vinod Subramanian]], [[Emmanouil Benetos|AUTHOR Emmanouil Benetos]]
</p><p class="cpabstractcardaffiliationlist">Queen Mary University of London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 831–835&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we investigate the importance of the extent of memory in sequential self attention for sound recognition. We propose to use a memory controlled sequential self attention mechanism on top of a convolutional recurrent neural network (CRNN) model for polyphonic sound event detection (SED). Experiments on the URBAN-SED dataset demonstrate the impact of the extent of memory on sound recognition performance with the self attention induced SED model. We extend the proposed idea with a multi-head self attention mechanism where each attention head processes the audio embedding with explicit attention width values. The proposed use of memory controlled sequential self attention offers a way to induce relations among frames of sound event tokens. We show that our memory controlled self attention model achieves an event based F-score of 33.92% on the URBAN-SED dataset, outperforming the F-score of 20.10% reported by the model without self attention.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Donghyeon Kim|AUTHOR Donghyeon Kim]]^^1^^, [[Jaihyun Park|AUTHOR Jaihyun Park]]^^1^^, [[David K. Han|AUTHOR David K. Han]]^^2^^, [[Hanseok Ko|AUTHOR Hanseok Ko]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Korea University, Korea; ^^2^^Army Research Lab, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 836–840&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audio based event recognition becomes quite challenging in real world noisy environments. To alleviate the noise issue, time-frequency mask based feature enhancement methods have been proposed. While these methods with fixed filter settings have been shown to be effective in familiar noise backgrounds, they become brittle when exposed to unexpected noise. To address the unknown noise problem, we develop an approach based on dynamic filter generation learning. In particular, we propose a dual stage dynamic filter generator networks that can be trained to generate a time-frequency mask specifically created for each input audio. Two alternative approaches of training the mask generator network are developed for feature enhancements in high noise environments. Our proposed method shows improved performance and robustness in both clean and unseen noise environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xu Zheng|AUTHOR Xu Zheng]]^^1^^, [[Yan Song|AUTHOR Yan Song]]^^1^^, [[Jie Yan|AUTHOR Jie Yan]]^^1^^, [[Li-Rong Dai|AUTHOR Li-Rong Dai]]^^1^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^1^^, [[Lin Liu|AUTHOR Lin Liu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^iFLYTEK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 841–845&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Mean teacher based methods are increasingly achieving state-of-the-art performance for large-scale weakly labeled and unlabeled sound event detection (SED) tasks in recent DCASE challenges. By penalizing inconsistent predictions under different perturbations, mean teacher methods can exploit large-scale unlabeled data in a self-ensembling manner. In this paper, an effective perturbation based semi-supervised learning (SSL) method is proposed based on the mean teacher method. Specifically, a new independent component (IC) module is proposed to introduce perturbations for different convolutional layers, designed as a combination of batch normalization and dropblock operations. The proposed IC module can reduce correlation between neurons to improve performance. A global statistics pooling based attention module is further proposed to explicitly model inter-dependencies between the time-frequency domain and channels, using statistics information (e.g. mean, standard deviation, max) along different dimensions. This can provide an effective attention mechanism to adaptively re-calibrate the output feature map. Experimental results on Task 4 of the DCASE2018 challenge demonstrate the superiority of the proposed method, achieving about 39.8% F1-score, outperforming the previous winning system’s 32.4% by a significant margin.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]]^^1^^, [[Bowen Shi|AUTHOR Bowen Shi]]^^2^^, [[Ming Sun|AUTHOR Ming Sun]]^^1^^, [[Chao Wang|AUTHOR Chao Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^TTIC, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 846–850&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a network architecture mainly designed for audio tagging, which can also be used for weakly supervised acoustic event detection (AED). The proposed network consists of a modified DenseNet as the feature extractor, and a global average pooling (GAP) layer to predict frame-level labels at inference time. This architecture is inspired by the work proposed by Zhou et al., a well-known framework using GAP to localize visual objects given image-level labels. While most of the previous works on weakly supervised AED used recurrent layers with attention-based mechanism to localize acoustic events, the proposed network directly localizes events using the feature map extracted by DenseNet without any recurrent layers. In the audio tagging task of DCASE 2017, our method significantly outperforms the state-of-the-art method in F1 score by 5.3% on the dev set, and 6.0% on the eval set in terms of absolute values. For weakly supervised AED task in DCASE 2018, our model outperforms the state-of-the-art method in event-based F1 by 8.1% on the dev set, and 0.5% on the eval set in terms of absolute values, by using data augmentation and tri-training to leverage unlabeled data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chun-Chieh Chang|AUTHOR Chun-Chieh Chang]]^^1^^, [[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]]^^2^^, [[Ming Sun|AUTHOR Ming Sun]]^^2^^, [[Chao Wang|AUTHOR Chao Wang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 851–855&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Knowledge Distillation (KD) is a popular area of research for reducing the size of large models while still maintaining good performance. The outputs of larger teacher models are used to guide the training of smaller student models. Given the repetitive nature of acoustic events, we propose to leverage this information to regulate the KD training for Audio Tagging. This novel KD method, Intra-Utterance Similarity Preserving KD (IUSP), shows promising results for the audio tagging task. It is motivated by the previously published KD method: Similarity Preserving KD (SP). However, instead of preserving the pairwise similarities between inputs within a mini-batch, our method preserves the pairwise similarities between the frames of a single input utterance. Our proposed KD method, IUSP, shows consistent improvements over SP across student models of different sizes on the DCASE 2019 Task 5 dataset for audio tagging. There is a 27.1% to 122.4% percent increase in improvement of micro AUPRC over the baseline relative to SPs improvement of over the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Inyoung Park|AUTHOR Inyoung Park]], [[Hong Kook Kim|AUTHOR Hong Kook Kim]]
</p><p class="cpabstractcardaffiliationlist">GIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 856–860&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a two-stage sound event detection (SED) model to deal with sound events overlapping in time-frequency. In the first stage which consists of a faster R-CNN and an attention-LSTM, each log-mel spectrogram segment is divided into one or more proposed regions (PRs) according to the coordinates of a region proposal network. To efficiently train polyphonic sound, we take only one PR for each sound event from a bounding box regressor associated with the attention-LSTM. In the second stage, the original input image and the difference image between adjacent segments are separately pooled according to the coordinate of each PR predicted in the first stage. Then, two feature maps using CNNs are concatenated and processed further by LSTM. Finally, CTC-based n-best SED is conducted using the softmax output from the CNN-LSTM, where CTC has two tokens for each event so that the start and ending time frames are accurately detected. Experiments on SED using DCASE 2019 Task 3 show that the proposed two-stage model with multi-token CTC achieves an F1-score of 97.5%, while the first stage alone and the two-stage model with a conventional CTC yield F1-scores of 91.9% and 95.6%, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Martin Radfar|AUTHOR Martin Radfar]], [[Athanasios Mouchtaris|AUTHOR Athanasios Mouchtaris]], [[Siegfried Kunzmann|AUTHOR Siegfried Kunzmann]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 866–870&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken language understanding (SLU) refers to the process of inferring the semantic information from audio signals. While the neural transformers consistently deliver the best performance among the state-of-the-art neural architectures in field of natural language processing (NLP), their merits in a closely related field, i.e., spoken language understanding (SLU) have not been investigated. In this paper, we introduce an end-to-end neural transformer-based SLU model that can predict the variable-length domain, intent, and slots vectors embedded in an audio signal with no intermediate token prediction architecture. This new architecture leverages the self-attention mechanism by which the audio signal is transformed to various sub-subspaces allowing to extract the semantic context implied by an utterance. Our end-to-end transformer SLU predicts the domains, intents and slots in the Fluent Speech Commands dataset with accuracy equal to 98.1%, 99.6%, and 99.6%, respectively and outperforms the SLU models that leverage a combination of recurrent and convolutional neural networks by 1.4% while the size of our model is 25% smaller than that of these architectures. Additionally, due to independent sub-space projections in the self-attention layer, the model is highly parallelizable which makes it a good candidate for on-device SLU.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Karthik Gopalakrishnan|AUTHOR Karthik Gopalakrishnan]], [[Behnam Hedayatnia|AUTHOR Behnam Hedayatnia]], [[Longshaokan Wang|AUTHOR Longshaokan Wang]], [[Yang Liu|AUTHOR Yang Liu]], [[Dilek Hakkani-Tür|AUTHOR Dilek Hakkani-Tür]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 911–915&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Large end-to-end neural open-domain chatbots are becoming increasingly popular. However, research on building such chatbots has typically assumed that the user input is written in nature and it is not clear whether these chatbots would seamlessly integrate with automatic speech recognition (ASR) models to serve the speech modality. We aim to bring attention to this important question by empirically studying the effects of various types of synthetic and actual ASR hypotheses in the dialog history on TransferTransfo, a state-of-the-art Generative Pre-trained Transformer (GPT) based neural open-domain dialog system from the NeurIPS ConvAI2 challenge. We observe that TransferTransfo trained on written data is very sensitive to such hypotheses introduced to the dialog history during inference time. As a baseline mitigation strategy, we introduce synthetic ASR hypotheses to the dialog history during training and observe marginal improvements, demonstrating the need for further research into techniques to make end-to-end open-domain chatbots fully speech-robust. To the best of our knowledge, this is the first study to evaluate the effects of synthetic and actual ASR hypotheses on a state-of-the-art neural open-domain dialog system and we hope it promotes speech-robustness as an evaluation criterion in open-domain dialog.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chen Liu|AUTHOR Chen Liu]], [[Su Zhu|AUTHOR Su Zhu]], [[Zijian Zhao|AUTHOR Zijian Zhao]], [[Ruisheng Cao|AUTHOR Ruisheng Cao]], [[Lu Chen|AUTHOR Lu Chen]], [[Kai Yu|AUTHOR Kai Yu]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 871–875&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken Language Understanding (SLU) converts hypotheses from automatic speech recognizer (ASR) into structured semantic representations. ASR recognition errors can severely degenerate the performance of the subsequent SLU module. To address this issue, word confusion networks (WCNs) have been used as the input for SLU, which contain richer information than 1-best or n-best hypotheses list. To further eliminate ambiguity, the last system act of dialogue context is also utilized as additional input. In this paper, a novel BERT based SLU model (WCN-BERT SLU) is proposed to encode WCNs and the dialogue context jointly. It can integrate both structural information and ASR posterior probabilities of WCNs in the BERT architecture. Experiments on DSTC2, a benchmark of SLU, show that the proposed method is effective and can outperform previous state-of-the-art models significantly.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Milind Rao|AUTHOR Milind Rao]], [[Anirudh Raju|AUTHOR Anirudh Raju]], [[Pranav Dheram|AUTHOR Pranav Dheram]], [[Bach Bui|AUTHOR Bach Bui]], [[Ariya Rastrow|AUTHOR Ariya Rastrow]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 876–880&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We consider the problem of spoken language understanding (SLU) of extracting natural language intents and associated slot arguments or named entities from speech that is primarily directed at voice assistants. Such a system subsumes both automatic speech recognition (ASR) as well as natural language understanding (NLU). An end-to-end joint SLU model can be built to a required specification opening up the opportunity to deploy on hardware constrained scenarios like devices enabling voice assistants to work offline, in a privacy preserving manner, whilst also reducing server costs.

We first present models that extract utterance intent directly from speech without intermediate text output. We then present a compositional model, which generates the transcript using the Listen Attend Spell ASR system and then extracts interpretation using a neural NLU model. Finally, we contrast these methods to a jointly trained end-to-end joint SLU model, consisting of ASR and NLU subsystems which are connected by a neural network based interface instead of text, that produces transcripts as well as NLU interpretation. We show that the jointly trained model shows improvements to ASR incorporating semantic information from NLU and also improves NLU by exposing it to ASR confusion encoded in the hidden layer.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pavel Denisov|AUTHOR Pavel Denisov]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]
</p><p class="cpabstractcardaffiliationlist">Universität Stuttgart, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 881–885&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken language understanding is typically based on pipeline architectures including speech recognition and natural language understanding steps. These components are optimized independently to allow usage of available data, but the overall system suffers from error propagation. In this paper, we propose a novel training method that enables pretrained contextual embeddings to process acoustic features. In particular, we extend it with an encoder of pretrained speech recognition systems in order to construct end-to-end spoken language understanding systems. Our proposed method is based on the teacher-student framework across speech and text modalities that aligns the acoustic and the semantic latent spaces. Experimental results in three benchmarks show that our system reaches the performance comparable to the pipeline architecture without using any training data and outperforms it after fine-tuning with ten examples per class on two out of three benchmarks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Srikanth Raj Chetupalli|AUTHOR Srikanth Raj Chetupalli]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 886–890&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Conversational speech, while being unstructured at an utterance level, typically has a macro topic which provides larger context spanning multiple utterances. The current language models in speech recognition systems using recurrent neural networks (RNNLM) rely mainly on the local context and exclude the larger context. In order to model the long term dependencies of words across multiple sentences, we propose a novel architecture where the words from prior utterances are converted to an embedding. The relevance of these embeddings for the prediction of next word in the current sentence is found using a gating network. The relevance weighted context embedding vector is combined in the language model to improve the next word prediction, and the entire model including the context embedding and the relevance weighting layers is jointly learned for a conversational language modeling task. Experiments are performed on two conversational datasets — AMI corpus and the Switchboard corpus. In these tasks, we illustrate that the proposed approach yields significant improvements in language model perplexity over the RNNLM baseline. In addition, the use of proposed conversational LM for ASR rescoring results in absolute WER reduction of 1.2% on Switchboard dataset and 1.0% on AMI dataset over the RNNLM based ASR baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yusheng Tian|AUTHOR Yusheng Tian]], [[Philip John Gorinski|AUTHOR Philip John Gorinski]]
</p><p class="cpabstractcardaffiliationlist">Huawei Technologies, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 891–895&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end spoken language understanding (SLU) systems have many advantages over conventional pipeline systems, but collecting in-domain speech data to train an end-to-end system is costly and time consuming. One question arises from this: how to train an end-to-end SLU with limited amounts of data? Many researchers have explored approaches that make use of other related data resources, typically by pre-training parts of the model on high-resource speech recognition. In this paper, we suggest improving the generalization performance of SLU models with a non-standard learning algorithm, Reptile. Though Reptile was originally proposed for model-agnostic meta learning, we argue that it can also be used to directly learn a target task and result in better generalization than conventional gradient descent. In this work, we employ Reptile to the task of end-to-end spoken intent classification. Experiments on four datasets of different languages and domains show improvement of intent prediction accuracy, both when Reptile is used alone and used in addition to pre-training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Won Ik Cho|AUTHOR Won Ik Cho]]^^1^^, [[Donghyun Kwak|AUTHOR Donghyun Kwak]]^^2^^, [[Ji Won Yoon|AUTHOR Ji Won Yoon]]^^1^^, [[Nam Soo Kim|AUTHOR Nam Soo Kim]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Seoul National University, Korea; ^^2^^Search Solutions, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 896–900&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech is one of the most effective means of communication and is full of information that helps the transmission of utterer’s thoughts. However, mainly due to the cumbersome processing of acoustic features, phoneme or word posterior probability has frequently been discarded in understanding the natural language. Thus, some recent spoken language understanding (SLU) modules have utilized end-to-end structures that preserve the uncertainty information. This further reduces the propagation of speech recognition error and guarantees computational efficiency. We claim that in this process, the speech comprehension can benefit from the inference of massive pre-trained language models (LMs). We transfer the knowledge from a concrete Transformer-based text LM to an SLU module which can face a data shortage, based on recent cross-modal distillation methodologies. We demonstrate the validity of our proposal upon the performance on Fluent Speech Command, an English SLU benchmark. Thereby, we experimentally verify our hypothesis that the knowledge could be shared from the top layer of the LM to a fully speech-based module, in which the abstracted speech is expected to meet the semantic representation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weitong Ruan|AUTHOR Weitong Ruan]], [[Yaroslav Nechaev|AUTHOR Yaroslav Nechaev]], [[Luoxin Chen|AUTHOR Luoxin Chen]], [[Chengwei Su|AUTHOR Chengwei Su]], [[Imre Kiss|AUTHOR Imre Kiss]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 901–905&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A modern Spoken Language Understanding (SLU) system usually contains two sub-systems, Automatic Speech Recognition (ASR) and Natural Language Understanding (NLU), where ASR transforms voice signal to text form and NLU provides intent classification and slot filling from the text. In practice, such decoupled ASR/NLU design facilitates fast model iteration for both components. However, this makes downstream NLU susceptible to errors from the upstream ASR, causing significant performance degradation. Therefore, dealing with such errors is a major opportunity to improve overall SLU model performance. In this work, we first propose a general evaluation criterion that requires an ASR error robust model to perform well on both transcription and ASR hypothesis. Then robustness training techniques for both classification task and NER task are introduced. Experimental results on two datasets show that our proposed approaches improve model robustness to ASR errors for both tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hong-Kwang J. Kuo|AUTHOR Hong-Kwang J. Kuo]]^^1^^, [[Zoltán Tüske|AUTHOR Zoltán Tüske]]^^1^^, [[Samuel Thomas|AUTHOR Samuel Thomas]]^^1^^, [[Yinghui Huang|AUTHOR Yinghui Huang]]^^1^^, [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]]^^1^^, [[Brian Kingsbury|AUTHOR Brian Kingsbury]]^^1^^, [[Gakuto Kurata|AUTHOR Gakuto Kurata]]^^2^^, [[Zvi Kons|AUTHOR Zvi Kons]]^^3^^, [[Ron Hoory|AUTHOR Ron Hoory]]^^3^^, [[Luis Lastras|AUTHOR Luis Lastras]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, USA; ^^2^^IBM, Japan; ^^3^^IBM, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 906–910&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>An essential component of spoken language understanding (SLU) is slot filling: representing the meaning of a spoken utterance using semantic entity labels. In this paper, we develop end-to-end (E2E) spoken language understanding systems that directly convert speech input to semantic entities and investigate if these E2E SLU models can be trained solely on semantic entity annotations without word-for-word transcripts. Training such models is very useful as they can drastically reduce the cost of data collection. We created two types of such speech-to-entities models, a CTC model and an attention-based encoder-decoder model, by adapting models trained originally for speech recognition. Given that our experiments involve speech input, these systems need to recognize both the entity label and words representing the entity value correctly. For our speech-to-entities experiments on the ATIS corpus, both the CTC and attention models showed impressive ability to skip non-entity words: there was little degradation when trained on just entities versus full transcripts. We also explored the scenario where the entities are in an order not necessarily related to spoken order in the utterance. With its ability to do re-ordering, the attention model did remarkably well, achieving only about 2% degradation in speech-to-bag-of-entities F1 score.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jialu Li|AUTHOR Jialu Li]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]
</p><p class="cpabstractcardaffiliationlist">University of Illinois at Urbana-Champaign, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1027–1031&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Phones, the segmental units of the International Phonetic Alphabet (IPA), are used for lexical distinctions in most human languages; Tones, the suprasegmental units of the IPA, are used in perhaps 70%. Many previous studies have explored cross-lingual adaptation of automatic speech recognition (ASR) phone models, but few have explored the multilingual and cross-lingual transfer of synchronization between phones and tones. In this paper, we test four Connectionist Temporal Classification (CTC)-based acoustic models, differing in the degree of synchrony they impose between phones and tones. Models are trained and tested multilingually in three languages, then adapted and tested cross-lingually in a fourth. Both synchronous and asynchronous models are effective in both multilingual and cross-lingual settings. Synchronous models achieve lower error rate in the joint phone+tone tier, but asynchronous training results in lower tone error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jisung Wang|AUTHOR Jisung Wang]]^^1^^, [[Jihwan Kim|AUTHOR Jihwan Kim]]^^2^^, [[Sangki Kim|AUTHOR Sangki Kim]]^^2^^, [[Yeha Lee|AUTHOR Yeha Lee]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Naver, Korea; ^^2^^VUNO, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1072–1075&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) tasks are usually solved using lexicon-based hybrid systems or character-based acoustic models to automatically translate speech data into written text. While hybrid systems require a manually designed lexicon, end-to-end models can process character-based speech data. This resolves the need to define a lexicon for non-English languages for which a standard lexicon may be absent. Korean is relatively phonetic and has a unique writing system, and it is thus worth investigating useful modeling units for end-to-end Korean ASR. Our work is the first to compare the performance of deep neural networks (DNNs), designed as a combination of connectionist temporal classification and attention-based encoder-decoder, on various lexicon-free Korean models. Experiments on the Zeroth-Korean dataset and medical records, which consist of Korean-only and Korean-English code-switching corpora respectively, show how DNNs based on syllables and sub-words significantly outperform Jamo-based models on Korean ASR tasks. Our successful application of using lexicon-free modeling units on non-English ASR tasks provides compelling evidence that lexicon-free approaches can alleviate the heavy code-switching involved in non-English medical transcriptions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Martha Yifiru Tachbelie|AUTHOR Martha Yifiru Tachbelie]], [[Solomon Teferra Abate|AUTHOR Solomon Teferra Abate]], [[Tanja Schultz|AUTHOR Tanja Schultz]]
</p><p class="cpabstractcardaffiliationlist">Universität Bremen, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1032–1036&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present the cross-lingual and multilingual experiments we have conducted using existing resources of other languages for the development of speech recognition system for less-resourced languages. In our experiments, we used the Globalphone corpus as source and considered four Ethiopian languages namely Amharic, Oromo, Tigrigna and Wolaytta as targets. We have developed multilingual (ML) Automatic Speech Recognition (ASR) systems and decoded speech of the four Ethiopian languages. A multilingual acoustic model (AM) trained with speech data of 22 Globalphone languages but the target languages, achieved a Word Error Rate (WER) of 15.79%. Moreover, including training speech of one closely related language (in terms of phonetic overlap) in ML AM training resulted in a relative WER reduction of 51.41%. Although adaptation of ML systems did not give significant WER reduction over the monolingual ones, it enables us to rapidly adapt existing ML ASR systems to new languages. In sum, our experiments demonstrated that ASR systems can be developed rapidly with a pronunciation dictionary (PD) of low out of vocabulary (OOV) rate and a strong language model (LM).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenxin Hou|AUTHOR Wenxin Hou]]^^1^^, [[Yue Dong|AUTHOR Yue Dong]]^^1^^, [[Bairong Zhuang|AUTHOR Bairong Zhuang]]^^1^^, [[Longfei Yang|AUTHOR Longfei Yang]]^^1^^, [[Jiatong Shi|AUTHOR Jiatong Shi]]^^2^^, [[Takahiro Shinozaki|AUTHOR Takahiro Shinozaki]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tokyo Tech, Japan; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1037–1041&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we report a large-scale end-to-end language-independent multilingual model for joint automatic speech recognition (ASR) and language identification (LID). This model adopts hybrid CTC/attention architecture and achieves word error rate (WER) of 52.8 and LID accuracy of 93.5 on 42 languages with around 5000 hours of training data. We also compare the effects of using subword-level or character-level vocabulary for large-scale multilingual tasks. Furthermore, we transfer the pre-trained model to 14 low-resource languages. Results show that the pre-trained model achieves significantly better results than non-pretrained baselines on both language-specific and multilingual low-resource ASR tasks in terms of WER, which is reduced by 28.1% and 11.4% respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinyuan Zhou|AUTHOR Xinyuan Zhou]]^^1^^, [[Emre Yılmaz|AUTHOR Emre Yılmaz]]^^2^^, [[Yanhua Long|AUTHOR Yanhua Long]]^^1^^, [[Yijie Li|AUTHOR Yijie Li]]^^3^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SHNU, China; ^^2^^NUS, Singapore; ^^3^^Unisound, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1042–1046&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switching (CS) occurs when a speaker alternates words of two or more languages within a single sentence or across sentences. Automatic speech recognition (ASR) of CS speech has to deal with two or more languages at the same time. In this study, we propose a Transformer-based architecture with two symmetric language-specific encoders to capture the individual language attributes, that improve the acoustic representation of each language. These representations are combined using a language-specific multi-head attention mechanism in the decoder module. Each encoder and its corresponding attention module in the decoder are pre-trained using a large monolingual corpus aiming to alleviate the impact of limited CS training data. We call such a network a multi-encoder-decoder (MED) architecture. Experiments on the SEAME corpus show that the proposed MED architecture achieves 10.2% and 10.8% relative error rate reduction on the CS evaluation sets with Mandarin and English as the matrix language respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Solomon Teferra Abate|AUTHOR Solomon Teferra Abate]], [[Martha Yifiru Tachbelie|AUTHOR Martha Yifiru Tachbelie]], [[Tanja Schultz|AUTHOR Tanja Schultz]]
</p><p class="cpabstractcardaffiliationlist">Universität Bremen, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1047–1051&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Development of Multilingual Automatic Speech Recognition (ASR) systems enables to share existing speech and text corpora among languages. We have conducted experiments on the development of multilingual Acoustic Models (AM) and Language Models (LM) for Tigrigna. Using Amharic Deep Neural Network (DNN) AM, Tigrigna pronunciation dictionary and trigram LM, we achieved a Word Error Rate (WER) of 30.9% for Tigrigna. Adding training speech from the target language (Tigrigna) to the whole training speech of the donor language (Amharic) continuously reduces WER with the amount of added data. We have also developed different (including recurrent neural networks based) multilingual LMs and achieved a relative WER reduction of 3.56% compared to the use of monolingual trigram LMs. Considering scarcity of computational resources to decode with very large vocabularies, we have also experimented on the use of morphemes as pronunciation and language modeling units. We have achieved character error rate (CER) of 7.9% which is relatively lower by 38.3% to 1.3% than the CER of the word-based models of smaller vocabularies than 162k. Our results show the possibility of developing ASR system for an Ethio-Semitic language using an existing speech and text corpora of another language in the family.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yushi Hu|AUTHOR Yushi Hu]]^^1^^, [[Shane Settle|AUTHOR Shane Settle]]^^2^^, [[Karen Livescu|AUTHOR Karen Livescu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Chicago, USA; ^^2^^TTIC, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1052–1056&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic word embeddings (AWEs) are vector representations of spoken word segments. AWEs can be learned jointly with embeddings of character sequences, to generate phonetically meaningful embeddings of written words, or acoustically grounded word embeddings (AGWEs). Such embeddings have been used to improve speech retrieval, recognition, and spoken term discovery. In this work, we extend this idea to multiple low-resource languages. We jointly train an AWE model and an AGWE model, using phonetically transcribed data from multiple languages. The pre-trained models can then be used for unseen zero-resource languages, or fine-tuned on data from low-resource languages. We also investigate distinctive features, as an alternative to phone labels, to better share cross-lingual information. We test our models on word discrimination tasks for twelve languages. When trained on eleven languages and tested on the remaining unseen language, our model outperforms traditional unsupervised approaches like dynamic time warping. After fine-tuning the pre-trained models on one hour or even ten minutes of data from a new language, performance is typically much better than training on only the target-language data. We also find that phonetic supervision improves performance over character sequences, and that distinctive feature supervision is helpful in handling unseen phones in the target language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chia-Yu Li|AUTHOR Chia-Yu Li]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]
</p><p class="cpabstractcardaffiliationlist">Universität Stuttgart, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1057–1061&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents our latest effort on improving Code-switching language models that suffer from data scarcity. We investigate methods to augment Code-switching training text data by artificially generating them. Concretely, we propose a cycle-consistent adversarial networks based framework to transfer monolingual text into Code-switching text, considering Code-switching as a speaking style. Our experimental results on the SEAME corpus show that utilizing artificially generated Code-switching text data improves consistently the language model as well as the automatic speech recognition performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinhui Hu|AUTHOR Xinhui Hu]], [[Qi Zhang|AUTHOR Qi Zhang]], [[Lei Yang|AUTHOR Lei Yang]], [[Binbin Gu|AUTHOR Binbin Gu]], [[Xinkang Xu|AUTHOR Xinkang Xu]]
</p><p class="cpabstractcardaffiliationlist">Hithink RoyalFlush AI Research Institute, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1062–1066&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To deal with the problem of data scarce in training language model (LM) for code-switching (CS) speech recognition, we proposed an approach to obtain augmentation texts from three different viewpoints. The first one is to enhance monolingual LM by selecting corresponding sentences for existing conversational corpora; The second one is based on replacements using syntactic constraint for a monolingual Chinese corpus, with the helps of an aligned word list obtained from a pseudo-parallel corpus, and part-of-speech (POS) of words; The third one is to use text generation based on a pointer-generator network with copy mechanism, using a real CS text data for training. All sentences from these approaches show improvement for CS LMs, and they are finally fused into an LM for CS ASR tasks.

Evaluations on LMs built by the above augmented data were conducted on two Mandarin-English CS speech sets DTANG, and SEAME. The perplexities were greatly reduced with all kinds of augmented texts, and speech recognition performances were steadily improved. The mixed word error rate (MER) of DTANG and SEAME evaluation dataset got relative reduction by 9.10% and 29.73%, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinxing Li|AUTHOR Xinxing Li]], [[Edward Lin|AUTHOR Edward Lin]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1067–1071&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Punctuation prediction is a critical component for speech recognition readability and speech translation segmentation. When considering multiple language support, traditional monolingual neural network models used for punctuation prediction can be costly to manage and may not produce the best accuracy. In this paper, we investigate multilingual Long Short-Term Memory (LSTM) modeling using Byte Pair Encoding (BPE) for punctuation prediction to support 43 languages¹ across 69 countries. Our findings show a single multilingual BPE-based model can achieve similar or even better performance than separate monolingual word-based models by benefiting from shared information across different languages. On an in-domain news text test set, the multilingual model achieves on average 80.2% //F1//-score while on out-of-domain speech recognition text, it achieves 73.5% //F1//-score. We also show that the shared information can help in fine-tuning for low-resource languages as well.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takayuki Arai|AUTHOR Takayuki Arai]]
</p><p class="cpabstractcardaffiliationlist">Sophia University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1366–1370&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In 2017 and 2018, two types of vocal-tract models with physical materials were developed that resemble anatomical models and can physically produce human-like speech sounds. The 2017 model is a static-model, and its vocal-tract configuration is set to produce the vowel /a/. The 2018 model is a dynamic-model, and portions of the articulators including the top surface of the tongue are made of a gel-type material. This allows a user to manipulate the shape of the tongue and articulate different vowels and a certain set of consonants. However, the mandible of the model is fixed, making it difficult to manipulate different sounds with different jaw openings, such as high vs. low vowels. Therefore, in 2019, two types were developed by adding an additional mandible mechanism to the 2018 model. For the first type, the mandible was designed to move between the open and closed positions by creating an arc-shape rail. For the second type, the mandible moves the same trajectory with an additional support. As a result, various speech sounds with a flexible-tongue and moveable mandible can be easily produced. These models are more realistic than the anatomical models proposed in 2017 and 2018 in terms of articulatory movements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qiang Fang|AUTHOR Qiang Fang]]
</p><p class="cpabstractcardaffiliationlist">CASS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1371–1375&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For decades, average Root Mean Square Error (RMSE) over all the articulatory channels is one of the most prevalent cost functions for training statistical models for the task of acoustic-to-articulatory inversion (AAI). One of the underlying assumptions is that the samples of all the articulatory channels used for training are balanced and play the same role in AAI. However, this is not true from speech production point view. In this study, at each time instant, each articulatory channel is classified to be critical or noncritical according to their roles in the formation of constrictions along the vocal tract when producing speech sound. It is found that the training set is dominated by the samples of noncritical articulatory channels. To deal with the unbalanced dataset problem, several Bi-LSTM networks are trained by removing the of noncritical portions of each articulatory channels if the training errors are less than some dynamic threshold. The results indicate that the average RMSE over all the articulatory channels, the average RMSE over the critical articulators, and the average RMSE over the noncritical articulators can be reduced significantly by the proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aravind Illa|AUTHOR Aravind Illa]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1376–1380&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech production involves the movement of various articulators, including tongue, jaw, and lips. Estimating the movement of the articulators from the acoustics of speech is known as acoustic-to-articulatory inversion (AAI). Recently, it has been shown that instead of training AAI in a speaker specific manner, pooling the acoustic-articulatory data from multiple speakers is beneficial. Further, additional conditioning with speaker specific information by one-hot encoding at the input of AAI along with acoustic features benefits the AAI performance in a closed-set speaker train and test condition. In this work, we carry out an experimental study on the benefit of using x-vectors for providing speaker specific information to condition AAI. Experiments with 30 speakers have shown that the AAI performance benefits from the use of x-vectors in a closed set seen speaker condition. Further, x-vectors also generalizes well for unseen speaker evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zirui Liu|AUTHOR Zirui Liu]]^^1^^, [[Yi Xu|AUTHOR Yi Xu]]^^1^^, [[Feng-fan Hsieh|AUTHOR Feng-fan Hsieh]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University College London, UK; ^^2^^National Tsing Hua University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1381–1385&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study we tested the hypothesis that consonant and vowel articulations start at the same time at syllable onset [1]. Articulatory data was collected for Mandarin Chinese using Electromagnetic Articulography (EMA), which tracks flesh-point movements in time and space. Unlike the traditional velocity threshold method [2], we used a triplet method based on the minimal pair paradigm [3] that detects divergence points between contrastive pairs of C or V respectively, before comparing their relative timing. Results show that articulatory onsets of consonant and vowel in CV syllables do not differ significantly from each other, which is consistent with the CV synchrony hypothesis. At the same time, the results also show some evidence that articulators that are shared by both C and V are engaged in sequential articulation, i.e., approaching the V target after approaching the C target.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[J^onatas Santos|AUTHOR J^onatas Santos]], [[Jugurta Montalvão|AUTHOR Jugurta Montalvão]], [[Israel Santos|AUTHOR Israel Santos]]
</p><p class="cpabstractcardaffiliationlist">UFS, Brazil</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1386–1390&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A new model for vocal folds with a polyp is proposed, based on a mass-spring-damper system and body-cover structure. The model was used to synthesize a wide variety of sustained vowels samples, with and without vocal polyps. Analytical conjectures regarding the effect of a polyp on synthesized voice signals corresponding to sustained vowels were performed. These conjectures are then used to estimate intrinsic dimension and differential entropy. These parameters were used to implement a naive classifier with the samples of the public //Saarbruecken// Voice Database, as a proof of concept. The results obtained suggests that the model presented in this paper might be a useful tool for tuning actual polyp detectors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lin Zhang|AUTHOR Lin Zhang]]^^1^^, [[Kiyoshi Honda|AUTHOR Kiyoshi Honda]]^^1^^, [[Jianguo Wei|AUTHOR Jianguo Wei]]^^1^^, [[Seiji Adachi|AUTHOR Seiji Adachi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin University, China; ^^2^^Fraunhofer IBP, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1391–1395&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study attempts to describe a plausible causal mechanism of generating individual vocal characteristics in higher spectra. The lower vocal tract has been suggested to be such a causal region, but a question remains as to how this region modulates vowels’ higher spectra. Based on existing data, this study predicts that resonance of the lower vocal tract modulates higher vowel spectra into a peak-dip-peak pattern. A preliminary acoustic simulation was made to confirm that complexity of lower vocal-tract cavities generates such a pattern with the second peak. This spectral modulation pattern was further examined to see to what extent it contributes to generating static speaker characteristics. To do so, a statistical analysis of male and female F-ratio curves was conducted based on a speech database. In the result, three frequency regions for the peak-dip-peak patterns correspond to three regions in the gender-specific F-ratio curves. Thus, this study suggests that, while the first peak may be the major determinant by the human ears, the whole frequency pattern facilitates speaker recognition by machines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Renuka Mannem|AUTHOR Renuka Mannem]]^^1^^, [[Navaneetha Gaddam|AUTHOR Navaneetha Gaddam]]^^2^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^RGUKT, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1396–1400&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The real-time Magnetic Resonance Imaging (rtMRI) is often used for speech production research as it captures the complete view of the vocal tract during speech. Air-tissue boundaries (ATBs) are the contours that trace the transition between high-intensity tissue region and low-intensity airway cavity region in an rtMRI video. The ATBs are used in several speech related applications. However, the ATB segmentation is a challenging task as the rtMRI frames have low resolution and low signal-to-noise ratio. Several works have been proposed in the past for ATB segmentation. Among these, the supervised algorithms have been shown to perform well compared to the unsupervised algorithms. However, the supervised algorithms have limited generalizability towards subjects not involved in training. In this work, we propose a 3-dimensional convolutional neural network (3D-CNN) which utilizes both spatial and temporal information from the rtMRI video for accurate ATB segmentation. The 3D-CNN model captures the vocal tract dynamics in an rtMRI video independent of the morphology of the subject leading to an accurate ATB segmentation for unseen subjects. In a leave-one-subject-out experimental setup, it is observed that the proposed approach provides ~32% relative improvement in the performance compared to the best (SegNet based) baseline approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tilak Purohit|AUTHOR Tilak Purohit]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1401–1405&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a technique to estimate virtual upper lip (VUL) and virtual lower lip (VLL) trajectories during production of bilabial stop consonants (/p/, /b/) and nasal (/m/). A VUL (VLL) is a hypothetical trajectory below (above) the measured UL (LL) trajectory which could have been achieved by UL (LL) if UL and LL were not in contact with each other during bilabial stops and nasal. Maximum deviation of UL from VUL and its location as well as the range of VUL are used as features, denoted by VUL MD, VUL MDL, and VUL R, respectively. Similarly, VLL MD, VLL MDL, and VLL R are also computed. Analyses of these six features are carried out for /p/, /b/, and /m/ at slow, normal and fast rates based on electromagnetic articulograph (EMA) recordings of VCV stimuli spoken by ten subjects. While no significant differences were observed among /p/, /b/, and /m/ in every rate, all six features except VLL MD were found to drop significantly from slow to fast rates. These six features were also found to perform better in an automatic classification task between slow vs fast rates compared to five baseline features computed from UL and LL comprising their ranges, velocities and minimum distance from each other.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Meng Ge|AUTHOR Meng Ge]]^^1^^, [[Chenglin Xu|AUTHOR Chenglin Xu]]^^2^^, [[Longbiao Wang|AUTHOR Longbiao Wang]]^^1^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^3^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin University, China; ^^2^^NTU, Singapore; ^^3^^NTU, Singapore; ^^4^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1406–1410&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker extraction aims to extract the target speech signal from a multi-talker environment given a target speaker’s reference speech. We recently proposed a time-domain solution, SpEx, that avoids the phase estimation in frequency-domain approaches. Unfortunately, SpEx is not fully a time-domain solution since it performs time-domain speech encoding for speaker extraction, while taking frequency-domain speaker embedding as the reference. The size of the analysis window for time-domain and the size for frequency-domain input are also different. Such mismatch has an adverse effect on the system performance. To eliminate such mismatch, we propose a complete time-domain speaker extraction solution, that is called SpEx+. Specifically, we tie the weights of two identical speech encoder networks, one for the encoder-extractor-decoder pipeline, another as part of the speaker encoder. Experiments show that the SpEx+ achieves 0.8dB and 2.1dB SDR improvement over the state-of-the-art SpEx baseline, under different and same gender conditions on WSJ0-2mix-extr database respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiahao Xu|AUTHOR Jiahao Xu]]^^1^^, [[Kun Hu|AUTHOR Kun Hu]]^^1^^, [[Chang Xu|AUTHOR Chang Xu]]^^1^^, [[Duc Chung Tran|AUTHOR Duc Chung Tran]]^^2^^, [[Zhiyong Wang|AUTHOR Zhiyong Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Sydney, Australia; ^^2^^FPT University, Vietnam</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1451–1455&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Predicting and applying Time-Frequency (T-F) masks on mixture signals have been successfully utilized for speech separation. However, existing studies have not well utilized the identity context of a speaker for the inference of masks. In this paper, we propose a novel speaker-aware monaural speech separation model. We firstly devise an encoder to disentangle speaker identity information with the supervision from the auxiliary speaker verification task. Then, we develop a spectrogram masking network to predict speaker masks, which would be applied to the mixture signal for the reconstruction of source signals. Experimental results on two WSJ0 mixed datasets demonstrate that our proposed model outperforms existing models in different separation scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tingle Li|AUTHOR Tingle Li]], [[Qingjian Lin|AUTHOR Qingjian Lin]], [[Yuanyuan Bao|AUTHOR Yuanyuan Bao]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1411–1415&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, Convolutional Neural Network (CNN) and Long short-term memory (LSTM) based models have been introduced to deep learning-based target speaker separation. In this paper, we propose an Attention-based neural network (Atss-Net) in the spectrogram domain for the task. It allows the network to compute the correlation between each feature parallelly, and using shallower layers to extract more features, compared with the CNN-LSTM architecture. Experimental results show that our Atss-Net yields better performance than the VoiceFilter, although it only contains half of the parameters. Furthermore, our proposed model also demonstrates promising performance in speech enhancement.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Leyuan Qu|AUTHOR Leyuan Qu]], [[Cornelius Weber|AUTHOR Cornelius Weber]], [[Stefan Wermter|AUTHOR Stefan Wermter]]
</p><p class="cpabstractcardaffiliationlist">Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1416–1420&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Target speech separation refers to isolating target speech from a multi-speaker mixture signal by conditioning on auxiliary information about the target speaker. Different from the mainstream audio-visual approaches which usually require simultaneous visual streams as additional input, e.g. the corresponding lip movement sequences, in our approach we propose the novel use of a single face profile of the target speaker to separate expected clean speech. We exploit the fact that the image of a face contains information about the person’s speech sound. Compared to using a simultaneous visual sequence, a face image is easier to obtain by pre-enrollment or on websites, which enables the system to generalize to devices without cameras. To this end, we incorporate face embeddings extracted from a pre-trained model for face recognition into the speech separation, which guide the system in predicting a target speaker mask in the time-frequency domain. The experimental results show that a pre-enrolled face image is able to benefit separating expected speech signals. Additionally, face information is complementary to voice reference and we show that further improvement can be achieved when combining both face and voice embeddings¹.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zining Zhang|AUTHOR Zining Zhang]]^^1^^, [[Bingsheng He|AUTHOR Bingsheng He]]^^2^^, [[Zhenjie Zhang|AUTHOR Zhenjie Zhang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^YITU Technology, Singapore; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1421–1425&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Extracting the speech of a target speaker from mixed audios, based on a reference speech from the target speaker, is a challenging yet powerful technology in speech processing. Recent studies of speaker-independent speech separation, such as TasNet, have shown promising results by applying deep neural networks over the time-domain waveform. Such separation neural network does not directly generate reliable and accurate output when target speakers are specified, because of the necessary prior on the number of speakers and the lack of robustness when dealing with audios with absent speakers. In this paper, we break these limitations by introducing a new speaker-aware speech masking method, called X-TaSNet. Our proposal adopts new strategies, including a distortion-based loss and corresponding alternating training scheme, to better address the robustness issue. X-TaSNet significantly enhances the extracted speech quality, doubling SDRi and SI-SNRi of the output speech audio over state-of-the-art voice filtering approach. X-TaSNet also improves the reliability of the results by improving the accuracy of speaker identity in the output audio to 95.4%, such that it returns silent audios in most cases when the target speaker is absent. These results demonstrate X-TaSNet moves one solid step towards more practical applications of speaker extraction technology.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chenda Li|AUTHOR Chenda Li]], [[Yanmin Qian|AUTHOR Yanmin Qian]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1426–1430&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Solving the cocktail party problem with the multi-modal approach has become popular in recent years. Humans can focus on the speech that they are interested in for the multi-talker mixed speech, by hearing the mixed speech, watching the speaker, and understanding the context what the speaker is talking about. In this paper, we try to solve the speaker-independent speech separation problem with all three audio-visual-contextual modalities at the first time, and those are hearing speech, watching speaker and understanding contextual language. Compared to the previous methods applying pure audio modal or audio-visual modals, a specific model is further designed to extract contextual language information for all target speakers directly from the speech mixture. Then these extracted contextual knowledge are further incorporated into the multi-modal based speech separation architecture with an appropriate attention mechanism. The experiments show that a significant performance improvement can be observed with the newly proposed audio-visual-contextual speech separation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yunzhe Hao|AUTHOR Yunzhe Hao]]^^1^^, [[Jiaming Xu|AUTHOR Jiaming Xu]]^^1^^, [[Jing Shi|AUTHOR Jing Shi]]^^1^^, [[Peng Zhang|AUTHOR Peng Zhang]]^^1^^, [[Lei Qin|AUTHOR Lei Qin]]^^2^^, [[Bo Xu|AUTHOR Bo Xu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Huawei Technologies, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1431–1435&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech recognition technology in single-talker scenes has matured in recent years. However, in noisy environments, especially in multi-talker scenes, speech recognition performance is significantly reduced. Towards cocktail party problem, we propose a unified time-domain target speaker extraction framework. In this framework, we obtain a voiceprint from a clean speech of the target speaker and then extract the speech of the same speaker in a mixed speech based on the previously obtained voiceprint. This framework uses voiceprint information to avoid permutation problems. In addition, a time-domain model can avoid the phase reconstruction problem of traditional time-frequency domain models. Our framework is suitable for scenes where people are relatively fixed and their voiceprints are easily registered, such as in a car, home, meeting room, or other such scenes. The proposed global model based on the dual-path recurrent neural network (DPRNN) block achieved state-of-the-art under speaker extraction tasks on the WSJ0-2mix dataset. We also built corresponding low-latency models. Results showed comparable model performance and a much shorter upper limit latency than time-frequency domain models. We found that performance of the low-latency model gradually decreased as latency decreased, which is important when deploying models in actual application scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jianshu Zhao|AUTHOR Jianshu Zhao]], [[Shengzhou Gao|AUTHOR Shengzhou Gao]], [[Takahiro Shinozaki|AUTHOR Takahiro Shinozaki]]
</p><p class="cpabstractcardaffiliationlist">Tokyo Tech, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1436–1440&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Target-speaker speech separation, due to its essence in industrial applications, has been heavily researched for long by many. The key metric for qualifying a good separation algorithm still lies on the separation performance, i.e., the quality of the separated voice. In this paper, we presented a novel high-performance time-domain waveform based target-speaker speech separation architecture (WaveFilter) for this task. Unlike most previous researches which adopted Time-Frequency based approaches, WaveFilter does the job by applying Convolutional Neural Network (CNN) based feature extractors directly on the raw Time-domain audio data, for both the speech separation network and the auxiliary target-speaker feature extraction network. We achieved a 10.46 Signal to Noise Ratio (SNR) improvement on the WSJ0 2-mix dataset and a 10.44 SNR improvement on the Librispeech dataset as our final results, which is much higher than the existing approaches. Our method also achieved an 4.9 SNR improvement on the WSJ0 3-mix data. This proves the feasibility of WaveFilter on separating the target-speaker’s voice from multi-speaker voice mixtures without knowing the exact number of speakers in advance, which in turn proves the readiness of our method for real-world applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tsubasa Ochiai|AUTHOR Tsubasa Ochiai]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Yuma Koizumi|AUTHOR Yuma Koizumi]], [[Hiroaki Ito|AUTHOR Hiroaki Ito]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Shoko Araki|AUTHOR Shoko Araki]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1441–1445&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Being able to control the acoustic events (AEs) to which we want to listen would allow the development of more controllable hearable devices. This paper addresses the AE sound selection (or removal) problems, that we define as the extraction (or suppression) of all the sounds that belong to one or multiple desired AE classes. Although this problem could be addressed with a combination of source separation followed by AE classification, this is a sub-optimal way of solving the problem. Moreover, source separation usually requires knowing the maximum number of sources, which may not be practical when dealing with AEs. In this paper, we propose instead a universal sound selection neural network that enables to directly select AE sounds from a mixture given user-specified target AE classes. The proposed framework can be explicitly optimized to simultaneously select sounds from multiple desired AE classes, independently of the number of sources in the mixture. We experimentally show that the proposed method achieves promising AE sound selection performance and could be generalized to mixtures with a number of sources that are unseen during training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Masahiro Yasuda|AUTHOR Masahiro Yasuda]], [[Yasunori Ohishi|AUTHOR Yasunori Ohishi]], [[Yuma Koizumi|AUTHOR Yuma Koizumi]], [[Noboru Harada|AUTHOR Noboru Harada]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1446–1450&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advancements in representation learning enable cross-modal retrieval by modeling an audio-visual co-occurrence in a single aspect, such as physical and linguistic. Unfortunately, in real-world media data, since co-occurrences in various aspects are complexly mixed, it is difficult to distinguish a specific target co-occurrence from many other non-target co-occurrences, resulting in failure in crossmodal retrieval. To overcome this problem, we propose a triplet-loss-based representation learning method that incorporates an awareness mechanism. We adopt weakly-supervised event detection, which provides a constraint in representation learning so that our method can “be aware” of a specific target audio-visual co-occurrence and discriminate it from other non-target co-occurrences. We evaluated the performance of our method by applying it to a sound effect retrieval task using recorded TV broadcast data. In the task, a sound effect appropriate for a given video input should be retrieved. We then conducted objective and subjective evaluations, the results indicating that the proposed method produces significantly better associations of sound and visual effects than baselines with no awareness mechanism.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Patrick von Platen|AUTHOR Patrick von Platen]]^^1^^, [[Fei Tao|AUTHOR Fei Tao]]^^2^^, [[Gokhan Tur|AUTHOR Gokhan Tur]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Uber, France; ^^2^^Uber, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1076–1080&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speaker verification systems are vulnerable to audio replay attacks which bypass security by replaying recordings of authorized speakers. Replay attack detection (RA) systems built upon Residual Neural Networks (ResNet)s have yielded astonishing results on the public benchmark ASVspoof 2019 Physical Access challenge. With most teams using fine-tuned feature extraction pipelines and model architectures, the generalizability of such systems remains questionable though. In this work, we analyse the effect of discriminative feature learning in a multi-task learning (MTL) setting can have on the generalizability and discriminability of RA detection systems. We use a popular ResNet architecture optimized by the cross-entropy criterion as our baseline and compare it to the same architecture optimized by MTL using Siamese Neural Networks (SNN). It can be shown that 26.8% relative improvement on Equal Error Rate (EER) is obtained by leveraging SNN.We further enhance the model’s architecture and demonstrate that SNN with additional reconstruction loss yield another significant improvement of relative 13.8% EER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kosuke Akimoto|AUTHOR Kosuke Akimoto]]^^1^^, [[Seng Pei Liew|AUTHOR Seng Pei Liew]]^^2^^, [[Sakiko Mishima|AUTHOR Sakiko Mishima]]^^1^^, [[Ryo Mizushima|AUTHOR Ryo Mizushima]]^^1^^, [[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NEC, Japan; ^^2^^LINE, Japan; ^^3^^A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1081–1085&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a new database of voice recordings with the goal of promoting research on protection of automatic speaker verification systems from voice spoofing, such as replay attacks. Specifically, we focus on the liveness feature of live speech, i.e., pop noise, and the corresponding voice recordings without this feature, for the purpose of combating spoofing via liveness detection. Our database includes simultaneous recordings using a microphone array, as well as recordings at various distances and positions. To the best of our knowledge, this is the first publicly available database that has been particularly designed to study the liveness features of voice recordings under various conditions.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongji Wang|AUTHOR Hongji Wang]], [[Heinrich Dinkel|AUTHOR Heinrich Dinkel]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1086–1090&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite tremendous progress in speaker verification recently, replay spoofing attacks are still a major threat to these systems. Focusing on dataset-specific scenarios, anti-spoofing systems have achieved promising in-domain performance at the cost of poor generalization towards unseen out-of-domain datasets. This is treated as a domain mismatch problem with a domain adversarial training (DAT) framework, which has previously been applied to enhance generalization. However, since only one domain discriminator is adopted, DAT suffers from the false alignment of cross-domain spoofed and genuine pairs, thus failing to acquire a strong spoofing-discriminative capability. In this work, we propose the dual-adversarial domain adaptation (DADA) framework to enable fine-grained alignment of spoofed and genuine data separately by using two domain discriminators, which effectively alleviates the above problem and further improves spoofing detection performance. Experiments on the ASVspoof 2017 V.2 dataset and the physical access portion of BTAS 2016 dataset demonstrate that the proposed DADA framework significantly outperforms the baseline model and DAT framework in cross-domain evaluation scenarios. It is shown that the newly proposed DADA architecture is more robust and effective for generalized replay attack detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hye-jin Shim|AUTHOR Hye-jin Shim]]^^1^^, [[Hee-Soo Heo|AUTHOR Hee-Soo Heo]]^^2^^, [[Jee-weon Jung|AUTHOR Jee-weon Jung]]^^1^^, [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Seoul, Korea; ^^2^^Naver, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1091–1095&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Constructing a dataset for replay spoofing detection requires a physical process of playing an utterance and re-recording it, presenting a challenge to the collection of large-scale datasets. In this study, we propose a self-supervised framework for pre-training acoustic configurations using datasets published for other tasks, such as speaker verification. Here, acoustic configurations refer to the environmental factors generated during the process of voice recording but not the voice itself, including microphone types, place and ambient noise levels. Specifically, we select pairs of segments from utterances and train deep neural networks to determine whether the acoustic configurations of the two segments are identical. We validate the effectiveness of the proposed method based on the ASVspoof 2019 physical access dataset utilizing two well-performing systems. The experimental results demonstrate that the proposed method outperforms the baseline approach by 30%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abhijith G.|AUTHOR Abhijith G.]], [[Adharsh S.|AUTHOR Adharsh S.]], [[Akshay P. L.|AUTHOR Akshay P. L.]], [[Rajeev Rajan|AUTHOR Rajeev Rajan]]
</p><p class="cpabstractcardaffiliationlist">College of Engineering Trivandrum, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1096–1100&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The fusion of i-vector with prosodic features is used to identify the most competent voice imitator through a deep neural network framework (DNN) in this paper. This experiment is conducted by analyzing the spectral and prosodic characteristics during voice imitation. Spectral features include mel-frequency cepstral features (MFCC) and modified group delay features (MODGDF). Prosodic features, computed by the Legendre polynomial approximation, are used as complementary information to the i-vector model. Proposed system evaluates the competence of artists in voice mimicking and ranks them according to the scores from a classifier based on mean opinion score (MOS). If the artist with the highest MOS is identified as rank-1 by the proposed system, a hit occurs. DNN-based classifier makes the decision based on the probability value on the nodes at the output layer. The performance is evaluated using top X-hit criteria on a mimicry dataset. Top-2 hit rate of 81.81% is obtained for fusion experiment. The experiments demonstrate the potential of i-vector framework and its fusion in competency evaluation of voice mimicking.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenzong Wu|AUTHOR Zhenzong Wu]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Jichen Yang|AUTHOR Jichen Yang]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1101–1105&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modern text-to-speech (TTS) and voice conversion (VC) systems produce natural sounding speech that questions the security of automatic speaker verification (ASV). This makes detection of such synthetic speech very important to safeguard ASV systems from unauthorized access. Most of the existing spoofing countermeasures perform well when the nature of the attacks is made known to the system during training. However, their performance degrades in face of unseen nature of attacks. In comparison to the synthetic speech created by a wide range of TTS and VC methods, genuine speech has a more consistent distribution. We believe that the difference between the distribution of synthetic and genuine speech is an important discriminative feature between the two classes. In this regard, we propose a novel method referred to as feature genuinization that learns a transformer with convolutional neural network (CNN) using the characteristics of only genuine speech. We then use this genuinization transformer with a light CNN classifier. The ASVspoof 2019 logical access corpus is used to evaluate the proposed method. The studies show that the proposed feature genuinization based LCNN system outperforms other state-of-the-art spoofing countermeasures, depicting its effectiveness for detection of synthetic speech attacks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hemlata Tak|AUTHOR Hemlata Tak]], [[Jose Patino|AUTHOR Jose Patino]], [[Andreas Nautsch|AUTHOR Andreas Nautsch]], [[Nicholas Evans|AUTHOR Nicholas Evans]], [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]
</p><p class="cpabstractcardaffiliationlist">EURECOM, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1106–1110&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The threat of spoofing can pose a risk to the reliability of automatic speaker verification. Results from the biannual ASVspoof evaluations show that effective countermeasures demand front-ends designed specifically for the detection of spoofing artefacts. Given the diversity in spoofing attacks, ensemble methods are particularly effective. The work in this paper shows that a bank of very simple classifiers, each with a front-end tuned to the detection of different spoofing attacks and combined at the score level through non-linear fusion, can deliver superior performance than more sophisticated ensemble solutions that rely upon complex neural network architectures. Our comparatively simple approach outperforms all but 2 of the 48 systems submitted to the logical access condition of the most recent ASVspoof 2019 challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Prasanth Parasu|AUTHOR Prasanth Parasu]], [[Julien Epps|AUTHOR Julien Epps]], [[Kaavya Sriskandaraja|AUTHOR Kaavya Sriskandaraja]], [[Gajan Suthokumar|AUTHOR Gajan Suthokumar]]
</p><p class="cpabstractcardaffiliationlist">UNSW Sydney, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1111–1115&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Current approaches to Voice Presentation Attack (VPA) detection have largely focused on spoofing detection within a single database and/or attack type. However, for practical Presentation Attack Detection (PAD) systems to be adopted by industry, they must be able to generalise to detect diverse and previously unseen VPAs. Inspired by successful aspects of deep learning systems for image classification such as the introduction of residual mappings through shortcut connections, this paper proposes a novel Light-ResNet architecture that provides good generalisation across databases and attack types. The introduction of skip connections within residual modules enables the training of deeper spoofing classifiers that can leverage more useful discriminative information learned in the hidden layers, while still generalising well under mismatched conditions. Utilising the wide variety of databases available for VPA research, this paper also proposes a set of generalisation evaluations which a practical PAD system should be able to meet: generalising within a database, generalising across databases within attack type and generalising across all spoofing classes. Evaluations on the ASVspoof 2015, BTAS 2016 (replay) and ASVspoof 2017 V2.0 databases show that the proposed Light-ResNet architecture is able to generalise across these diverse tasks consistently, outperforming CQCC-GMM and Attentive Filtering Network (AFN) baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenchun Lei|AUTHOR Zhenchun Lei]], [[Yingen Yang|AUTHOR Yingen Yang]], [[Changhong Liu|AUTHOR Changhong Liu]], [[Jihua Ye|AUTHOR Jihua Ye]]
</p><p class="cpabstractcardaffiliationlist">Jiangxi Normal University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1116–1120&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The security and reliability of automatic speaker verification systems can be threatened by different types of spoofing attacks using speech synthetic, voice conversion, or replay. The 2-class Gaussian Mixture Model classifier for genuine and spoofed speech is usually used as the baseline in the ASVspoof challenge, which is designed to develop the generalized countermeasures with potential to detect varying and unforeseen spoofing attacks. In the scoring phase, the GMM accumulates the scores on all frames in a test speech independently, and does not consider the relationship between adjacent frames. We propose the 1-D Convolutional Neural Network whose input is the log-probabilities of the speech frames on the GMM components. The new model considers not only the score distribution of GMM components, but also the local relationship of frames. And the pooling is used to extract the speech global character. The Siamese CNN is also proposed, which is based on two GMMs trained on genuine and spoofed speech respectively. Experiments on the ASVspoof 2019 challenge logical and physical access scenarios show that the proposed model can improve performance greatly compared with the baseline systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[H. Schröter|AUTHOR H. Schröter]]^^1^^, [[T. Rosenkranz|AUTHOR T. Rosenkranz]]^^2^^, [[A.N. Escalante-B.|AUTHOR A.N. Escalante-B.]]^^2^^, [[P. Zobel|AUTHOR P. Zobel]]^^1^^, [[Andreas Maier|AUTHOR Andreas Maier]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^FAU Erlangen-Nürnberg, Germany; ^^2^^Sivantos, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1121–1125&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep-learning based noise reduction algorithms have proven their success especially for non-stationary noises, which makes it desirable to also use them for embedded devices like hearing aids (HAs). This, however, is currently not possible with state-of-the-art methods. They either require a lot of parameters and computational power and thus are only feasible using modern CPUs. Or they are not suitable for online processing, which requires constraints like low-latency by the filter bank and the algorithm itself.

In this work, we propose a mask-based noise reduction approach. Using hierarchical recurrent neural networks, we are able to drastically reduce the number of neurons per layer while including temporal context via hierarchical connections. This allows us to optimize our model towards a minimum number of parameters and floating-point operations (FLOPs), while preserving noise reduction quality compared to previous work. Our smallest network contains only 5k parameters, which makes this algorithm applicable on embedded devices. We evaluate our model on a mixture of EUROM and a real-world noise database and report objective metrics on unseen noise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Viet Anh Trinh|AUTHOR Viet Anh Trinh]], [[Michael I. Mandel|AUTHOR Michael I. Mandel]]
</p><p class="cpabstractcardaffiliationlist">CUNY Graduate Center, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1166–1170&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a metric that we call the structured saliency benchmark (SSBM) to evaluate importance maps computed for automatic speech recognizers on individual utterances. These maps indicate time-frequency points of the utterance that are most important for correct recognition of a target word. Our evaluation technique is not only suitable for standard classification tasks, but is also appropriate for structured prediction tasks like sequence-to-sequence models. Additionally, we use this approach to perform a comparison of the importance maps created by our previously introduced technique using “bubble noise” to identify important points through correlation with a baseline approach based on smoothed speech energy and forced alignment. Our results show that the bubble analysis approach is better at identifying important speech regions than this baseline on 100 sentences from the AMI corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marco Tagliasacchi|AUTHOR Marco Tagliasacchi]], [[Yunpeng Li|AUTHOR Yunpeng Li]], [[Karolis Misiunas|AUTHOR Karolis Misiunas]], [[Dominik Roblek|AUTHOR Dominik Roblek]]
</p><p class="cpabstractcardaffiliationlist">Google, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1126–1130&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We explore the possibility of leveraging accelerometer data to perform speech enhancement in very noisy conditions. Although it is possible to only partially reconstruct user’s speech from the accelerometer, the latter provides a strong conditioning signal that is not influenced from noise sources in the environment. Based on this observation, we feed a multi-modal input to SEANet (Sound EnhAncement Network), a wave-to-wave fully convolutional model, which adopts a combination of feature losses and adversarial losses to reconstruct an enhanced version of user’s speech. We trained our model with data collected by sensors mounted on an earbud and synthetically corrupted by adding different kinds of noise sources to the audio signal. Our experimental results demonstrate that it is possible to achieve very high quality results, even in the case of interfering speech at the same level of loudness. A sample of the output produced by our model is available at https://google-research.github.io/seanet/multimodal/speech</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shang-Yi Chuang|AUTHOR Shang-Yi Chuang]]^^1^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^1^^, [[Chen-Chou Lo|AUTHOR Chen-Chou Lo]]^^2^^, [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Academia Sinica; ^^2^^KU Leuven, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1131–1135&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous studies have confirmed the effectiveness of incorporating visual information into speech enhancement (SE) systems. Despite improved denoising performance, two problems may be encountered when implementing an audio-visual SE (AVSE) system: (1) additional processing costs are incurred to incorporate visual input and (2) the use of face or lip images may cause privacy problems. In this study, we propose a Lite AVSE (LAVSE) system to address these problems. The system includes two visual data compression techniques and removes the visual feature extraction network from the training model, yielding better online computation efficiency. Our experimental results indicate that the proposed LAVSE system can provide notably better performance than an audio-only SE system with a similar number of model parameters. In addition, the experimental results confirm the effectiveness of the two techniques for visual data compression.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Christian Bergler|AUTHOR Christian Bergler]]^^1^^, [[Manuel Schmitt|AUTHOR Manuel Schmitt]]^^1^^, [[Andreas Maier|AUTHOR Andreas Maier]]^^1^^, [[Simeon Smeele|AUTHOR Simeon Smeele]]^^2^^, [[Volker Barth|AUTHOR Volker Barth]]^^3^^, [[Elmar Nöth|AUTHOR Elmar Nöth]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^FAU Erlangen-Nürnberg, Germany; ^^2^^MPI of Animal Behavior, Germany; ^^3^^Anthro-Media, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1136–1140&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In bioacoustics, passive acoustic monitoring of animals living in the wild, both on land and underwater, leads to large data archives characterized by a strong imbalance between recorded animal sounds and ambient noises. Bioacoustic datasets suffer extremely from such large noise-variety, caused by a multitude of external influences and changing environmental conditions over years. This leads to significant deficiencies/problems concerning the analysis and interpretation of animal vocalizations by biologists and machine-learning algorithms. To counteract such huge noise diversity, it is essential to develop a denoising procedure enabling automated, efficient, and robust data enhancement. However, a fundamental problem is the lack of clean/denoised ground-truth samples. The current work is the first presenting a fully-automated deep denoising approach for bioacoustics, not requiring any clean ground-truth, together with one of the largest data archives recorded on killer whales (//Orcinus Orca//) — the Orchive. Therefore, an approach, originally developed for image restoration, known as Noise2Noise (N2N), was transferred to the field of bioacoustics, and extended by using automatic machine-generated binary masks as additional network attention mechanism. Besides a significant cross-domain signal enhancement, our previous results regarding supervised orca/noise segmentation and orca call type identification were outperformed by applying ORCA-CLEAN as additional data preprocessing/enhancement step.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hao Zhang|AUTHOR Hao Zhang]], [[DeLiang Wang|AUTHOR DeLiang Wang]]
</p><p class="cpabstractcardaffiliationlist">Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1141–1145&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We formulate active noise control (ANC) as a supervised learning problem and propose a deep learning approach, called deep ANC, to address the nonlinear ANC problem. A convolutional recurrent network (CRN) is trained to estimate the real and imaginary spectrograms of the canceling signal from the reference signal so that the corresponding anti-noise can eliminate or attenuate the primary noise in the ANC system. Large-scale multi-condition training is employed to achieve good generalization and robustness against a variety of noises. The deep ANC method can be trained to achieve active noise cancellation no matter whether the reference signal is noise or noisy speech. In addition, a delay-compensated strategy is introduced to address the potential latency problem of ANC systems. Experimental results show that the proposed method is effective for wide-band noise reduction and generalizes well to untrained noises. Moreover, the proposed method can be trained to achieve ANC within a quiet zone.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tuan Dinh|AUTHOR Tuan Dinh]]^^1^^, [[Alexander Kain|AUTHOR Alexander Kain]]^^1^^, [[Kris Tjaden|AUTHOR Kris Tjaden]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Oregon Health & Science University, USA; ^^2^^SUNY Buffalo, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1146–1150&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Increasing speech intelligibility for hearing-impaired listeners and normal-hearing listeners in noisy environments remains a challenging problem. Spectral style conversion from habitual to clear speech is a promising approach to address the problem. Motivated by the success of generative adversarial networks (GANs) in various applications of image and speech processing, we explore the potential of conditional GANs (cGANs) to learn the mapping from habitual speech to clear speech. We evaluated the performance of cGANs in three tasks: 1) speaker-dependent one-to-one mappings, 2) speaker-independent many-to-one mappings, and 3) speaker-independent many-to-many mappings. In the first task, cGANs outperformed a traditional deep neural network mapping in terms of average keyword recall accuracy and the number of speakers with improved intelligibility. In the second task, we significantly improved intelligibility of one of three speakers, without any source speaker training data. In the third and most challenging task, we improved keyword recall accuracy for two of three speakers, but without statistical significance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mathias B. Pedersen|AUTHOR Mathias B. Pedersen]]^^1^^, [[Morten Kolbæk|AUTHOR Morten Kolbæk]]^^1^^, [[Asger H. Andersen|AUTHOR Asger H. Andersen]]^^2^^, [[Søren H. Jensen|AUTHOR Søren H. Jensen]]^^1^^, [[Jesper Jensen|AUTHOR Jesper Jensen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalborg University, Denmark; ^^2^^Oticon, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1151–1155&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Data-driven speech intelligibility prediction has been slow to take off. Datasets of measured speech intelligibility are scarce, and so current models are relatively small and rely on hand-picked features. Classical predictors based on psychoacoustic models and heuristics are still the state-of-the-art. This work proposes a U-Net inspired fully convolutional neural network architecture, NSIP, trained and tested on ten datasets to predict intelligibility of time-domain speech. The architecture is compared to a frequency domain data-driven predictor and to the classical state-of-the-art predictors STOI, ESTOI, HASPI and SIIB. The performance of NSIP is found to be superior for datasets seen in the training phase. On unseen datasets NSIP reaches performance comparable to classical predictors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kenichi Arai|AUTHOR Kenichi Arai]]^^1^^, [[Shoko Araki|AUTHOR Shoko Araki]]^^1^^, [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]]^^1^^, [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]]^^1^^, [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]^^1^^, [[Toshio Irino|AUTHOR Toshio Irino]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTT, Japan; ^^2^^Wakayama University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1156–1160&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The measurement of speech intelligibility (SI) still mainly relies on time-consuming and expensive subjective experiments because no versatile objective measure can predict SI. One promising candidate of an SI prediction method is an approach with a deep neural network (DNN)-based automatic speech recognition (ASR) system, due to its recent great advance. In this paper, we propose and evaluate SI prediction methods based on the posteriors of DNN-based ASR systems. Posteriors, which are the probabilities of phones given acoustic features, are derived using forced alignments between clean speech and a phone sequence. We evaluated some variations of the posteriors to improve the prediction performance. As a result of our experiments, a prediction method using a squared cumulative posterior probability achieved better accuracy than the conventional SI predictors based on well-established objective measures (STOI and eSTOI).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ali Abavisani|AUTHOR Ali Abavisani]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]
</p><p class="cpabstractcardaffiliationlist">University of Illinois at Urbana-Champaign, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1161–1165&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this article, we provide a model to estimate a real-valued measure of the intelligibility of individual speech segments. We trained regression models based on Convolutional Neural Networks (CNN) for stop consonants /p,t,k,b,d,ɡ/ associated with vowel /ɑ/, to estimate the corresponding Signal to Noise Ratio (SNR) at which the Consonant-Vowel (CV) sound becomes intelligible for Normal Hearing (NH) ears. The intelligibility measure for each sound is called SNR,,90,,, and is defined to be the SNR level at which human participants are able to recognize the consonant at least 90% correctly, on average, as determined in prior experiments with NH subjects. Performance of the CNN is compared to a baseline prediction based on automatic speech recognition (ASR), specifically, a constant offset subtracted from the SNR at which the ASR becomes capable of correctly labeling the consonant. Compared to baseline, our models were able to accurately estimate the SNR,,90,, intelligibility measure with less than 2 [dB²] Mean Squared Error (MSE) on average, while the baseline ASR-defined measure computes SNR,,90,, with a variance of 5.2 to 26.6 [dB²], depending on the consonant.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jixiang Li|AUTHOR Jixiang Li]], [[Chuming Liang|AUTHOR Chuming Liang]], [[Bo Zhang|AUTHOR Bo Zhang]], [[Zhao Wang|AUTHOR Zhao Wang]], [[Fei Xiang|AUTHOR Fei Xiang]], [[Xiangxiang Chu|AUTHOR Xiangxiang Chu]]
</p><p class="cpabstractcardaffiliationlist">Xiaomi, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1171–1175&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Convolutional neural networks are widely adopted in Acoustic Scene Classification (ASC) tasks, but they generally carry a heavy computational burden. In this work, we propose a high-performance yet lightweight baseline network inspired by MobileNetV2, which replaces square convolutional kernels with unidirectional ones to extract features alternately in temporal and frequency dimensions. Furthermore, we explore a dynamic architecture space built on the basis of the proposed baseline with the recent Neural Architecture Search (NAS) paradigm, which first train a supernet that incorporates all candidate architectures and then apply a well-known evolutionary algorithm NSGA-II to discover more efficient networks with higher accuracy and lower computational cost from the supernet. Experimental results demonstrate that our searched network is competent in ASC tasks, which achieves 90.3% F1-score on the DCASE2018 task 5 evaluation set, marking a new state-of-the-art performance while saving 25% of FLOPs compared to our baseline network.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zuzanna Kwiatkowska|AUTHOR Zuzanna Kwiatkowska]], [[Beniamin Kalinowski|AUTHOR Beniamin Kalinowski]], [[Michał Kośmider|AUTHOR Michał Kośmider]], [[Krzysztof Rykaczewski|AUTHOR Krzysztof Rykaczewski]]
</p><p class="cpabstractcardaffiliationlist">Samsung, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1216–1220&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we compare the performance of three selected techniques in open set acoustic scenes classification (ASC). We test thresholding of the softmax output of a deep network classifier, which is the most popular technique nowadays employed in ASC. Further we compare the results with the Openmax classifier which is derived from the computer vision field. As the third model, we use the Adapted Class-Conditioned Autoencoder (Adapted C2AE) which is our variation of another computer vision related technique called C2AE. Adapted C2AE encompasses a more fair comparison of the given experiments and simplifies the original inference procedure, making it more applicable in the real-life scenarios. We also analyse two training scenarios: without additional knowledge of unknown classes and another where a limited subset of examples from the unknown classes is available. We find that the C2AE based method outperforms the thresholding and Openmax, obtaining 85.5% Area Under the Receiver Operating Characteristic curve (AUROC) and 66% of open set accuracy on data used in Detection and Classification of Acoustic Scenes and Events Challenge 2019 Task 1C.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ju-ho Kim|AUTHOR Ju-ho Kim]], [[Seung-bin Kim|AUTHOR Seung-bin Kim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]
</p><p class="cpabstractcardaffiliationlist">University of Seoul, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1176–1180&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic scene classification systems using deep neural networks classify given recordings into pre-defined classes. In this study, we propose a novel scheme for acoustic scene classification which adopts an audio tagging system inspired by the human perception mechanism. When humans identify an acoustic scene, the existence of different sound events provides discriminative information which affects the judgement. The proposed framework mimics this mechanism using various approaches. Firstly, we employ three methods to concatenate tag vectors extracted using an audio tagging system with an intermediate hidden layer of an acoustic scene classification system. We also explore the multi-head attention on the feature map of an acoustic scene classification system using tag vectors. Experiments conducted on the detection and classification of acoustic scenes and events 2019 task 1-a dataset demonstrate the effectiveness of the proposed scheme. Concatenation and multi-head attention show a classification accuracy of 75.66% and 76.58%, respectively, compared to 73.63% accuracy of the baseline. The system with the proposed two approaches combined demonstrates an accuracy of 76.75%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Liwen Zhang|AUTHOR Liwen Zhang]]^^1^^, [[Jiqing Han|AUTHOR Jiqing Han]]^^1^^, [[Ziqiang Shi|AUTHOR Ziqiang Shi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Harbin Institute of Technology, China; ^^2^^Fujitsu, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1181–1185&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Convolutional Neural Networks (CNNs) have been widely investigated on Acoustic Scene Classification (ASC). Where the convolutional operation can extract useful semantic contents from a local receptive field in the input spectrogram within certain Manhattan distance, i.e., the kernel size. Although stacking multiple convolution layers can increase the range of the receptive field, without explicitly considering the temporal relations of different receptive fields, the increased range is limited around the kernel. In this paper, we propose a 3D CNN for ASC, named ATReSN-Net, which can capture temporal relations of different receptive fields from arbitrary time-frequency locations by mapping the semantic features obtained from the residual block into a semantic space. The ATReSN module has two primary components: first, a k-NN-based grouper for gathering a semantic neighborhood for each feature point in the feature maps. Second, an attentive pooling-based temporal relations aggregator for generating the temporal relations embedding of each feature point and its neighborhood. Experiments showed that our ATReSN-Net outperforms most of the state-of-the-art CNN models. We shared our code at ATReSN-Net.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jivitesh Sharma|AUTHOR Jivitesh Sharma]], [[Ole-Christoffer Granmo|AUTHOR Ole-Christoffer Granmo]], [[Morten Goodwin|AUTHOR Morten Goodwin]]
</p><p class="cpabstractcardaffiliationlist">University of Agder, Norway</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1186–1190&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2020/MEDIA/1303" class="externallinkbutton" target="_blank">{{$:/causal/ZIP Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a model for the Environment Sound Classification Task (ESC) that consists of multiple feature channels given as input to a Deep Convolutional Neural Network (CNN) with Attention mechanism. The novelty of the paper lies in using multiple feature channels consisting of Mel-Frequency Cepstral Coefficients (MFCC), Gammatone Frequency Cepstral Coefficients (GFCC), the Constant Q-transform (CQT) and Chromagram. And, we employ a deeper CNN (DCNN) compared to previous models, consisting of spatially separable convolutions working on time and feature domain separately. Alongside, we use attention modules that perform channel and spatial attention together. We use the mix-up data augmentation technique to further boost performance. Our model is able to achieve state-of-the-art performance on three benchmark environment sound classification datasets, i.e. the UrbanSound8K (97.52%), ESC-10 (94.75%) and ESC-50 (87.45%).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weimin Wang|AUTHOR Weimin Wang]]^^1^^, [[Weiran Wang|AUTHOR Weiran Wang]]^^2^^, [[Ming Sun|AUTHOR Ming Sun]]^^1^^, [[Chao Wang|AUTHOR Chao Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^Salesforce, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1191–1195&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic Scene Classification (ASC) is a challenging task, as a single scene may involve multiple events that contain complex sound patterns. For example, a cooking scene may contain several sound sources including silverware clinking, chopping, frying, etc. What complicates ASC more is that classes of different activities could have overlapping sounds patterns (e.g. both cooking and dishwashing could have silverware clinking sound). In this paper, we propose a multi-head attention network to model the complex temporal input structures for ASC. The proposed network takes the audio’s time-frequency representation as input, and it leverages standard VGG plus LSTM layers to extract high-level feature representation. Further more, it applies multiple attention heads to summarize various patterns of sound events into fixed dimensional representation, for the purpose of final scene classification. The whole network is trained in an end-to-end fashion with backpropagation. Experimental results confirm that our model discovers meaningful sound patterns through the attention mechanism, without using explicit supervision in the alignment. We evaluated our proposed model using DCASE 2018 Task 5 dataset, and achieved competitive performance on par with previous winner’s results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hu Hu|AUTHOR Hu Hu]]^^1^^, [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]]^^2^^, [[Yannan Wang|AUTHOR Yannan Wang]]^^3^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Georgia Tech, USA; ^^2^^Università di Enna “Kore”, Italy; ^^3^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1196–1200&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a domain adaptation framework to address the device mismatch issue in acoustic scene classification leveraging upon neural label embedding (NLE) and relational teacher student learning (RTSL). Taking into account the structural relationships between acoustic scene classes, our proposed framework captures such relationships which are intrinsically device-independent. In the training stage, transferable knowledge is condensed in NLE from the source domain. Next in the adaptation stage, a novel RTSL strategy is adopted to learn adapted target models without using paired source-target data often required in conventional teacher student learning. The proposed framework is evaluated on the DCASE 2018 Task1b data set. Experimental results based on AlexNet-L deep classification models confirm the effectiveness of our proposed approach for mismatch situations. NLE-alone adaptation compares favourably with the conventional device adaptation and teacher student based adaptation techniques. NLE with RTSL further improves the classification accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hu Hu|AUTHOR Hu Hu]]^^1^^, [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]]^^2^^, [[Yannan Wang|AUTHOR Yannan Wang]]^^3^^, [[Xue Bai|AUTHOR Xue Bai]]^^4^^, [[Jun Du|AUTHOR Jun Du]]^^4^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Georgia Tech, USA; ^^2^^Università di Enna “Kore”, Italy; ^^3^^Tencent, China; ^^4^^USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1201–1205&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a sub-utterance unit selection framework to remove acoustic segments in audio recordings that carry little information for acoustic scene classification (ASC). Our approach is built upon a universal set of acoustic segment units covering the overall acoustic scene space. First, those units are modeled with acoustic segment models (ASMs) used to tokenize acoustic scene utterances into sequences of acoustic segment units. Next, paralleling the idea of stop words in information retrieval, stop ASMs are automatically detected. Finally, acoustic segments associated with the stop ASMs are blocked, because of their low indexing power in retrieval of most acoustic scenes. In contrast to building scene models with whole utterances, the ASM-removed sub-utterances, i.e., acoustic utterances without stop acoustic segments, are then used as inputs to the AlexNet-L back-end for final classification. On the DCASE 2018 dataset, scene classification accuracy increases from 68%, with whole utterances, to 72.1%, with segment selection. This represents a competitive accuracy without any data augmentation, and/or ensemble strategy. Moreover, our approach compares favourably to AlexNet-L with attention.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dhanunjaya Varma Devalraju|AUTHOR Dhanunjaya Varma Devalraju]], [[Muralikrishna H.|AUTHOR Muralikrishna H.]], [[Padmanabhan Rajan|AUTHOR Padmanabhan Rajan]], [[Dileep Aroor Dinesh|AUTHOR Dileep Aroor Dinesh]]
</p><p class="cpabstractcardaffiliationlist">IIT Mandi, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1206–1210&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic soundscapes can be made up of background sound events and foreground sound events. Many times, either the background (or the foreground) may provide useful cues in discriminating one soundscape from another. A part of the background or a part of the foreground can be suppressed by using subspace projections. These projections can be learnt by utilising the framework of robust principal component analysis. In this work, audio signals are represented as embeddings from a convolutional neural network, and meta-embeddings are derived using an attention mechanism. This representation enables the use of class-specific projections for effective suppression, leading to good discrimination. Our experimental evaluation demonstrates the effectiveness of the method on standard datasets for acoustic scene classification.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Panagiotis Tzirakis|AUTHOR Panagiotis Tzirakis]], [[Alexander Shiarella|AUTHOR Alexander Shiarella]], [[Robert Ewers|AUTHOR Robert Ewers]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]
</p><p class="cpabstractcardaffiliationlist">Imperial College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1211–1215&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Auditory data is used by ecologists for a variety of purposes, including identifying species ranges, estimating population sizes, and studying behaviour. Autonomous recording units (ARUs) enable auditory data collection over a wider area, and can provide improved consistency over traditional sampling methods. The result is an abundance of audio data — much more than can be analysed by scientists with the appropriate taxonomic skills. In this paper, we address the divide between academic machine learning research on animal vocalisation classifiers, and their application to conservation efforts. As a unique case study, we build a Bornean gibbon call detection system by first manually annotating existing data, and then comparing audio analysis tool kits including end-to-end and bag-of-audio-word modelling. Finally, we propose a deep architecture that outperforms the other approaches with respect to unweighted average recall. The code is available at: https://github.com/glam-imperial/Bornean-Gibbons-Call-Detection</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Orazio Angelini|AUTHOR Orazio Angelini]], [[Alexis Moinet|AUTHOR Alexis Moinet]], [[Kayoko Yanagisawa|AUTHOR Kayoko Yanagisawa]], [[Thomas Drugman|AUTHOR Thomas Drugman]]
</p><p class="cpabstractcardaffiliationlist">Amazon, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1221–1225&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present UTACO, a singing synthesis model based on an attention-based sequence-to-sequence mechanism and a vocoder based on dilated causal convolutions. These two classes of models have significantly affected the field of text-to-speech, but have never been thoroughly applied to the task of singing synthesis. UTACO demonstrates that attention can be successfully applied to the singing synthesis field and improves naturalness over the state of the art. The system requires considerably less explicit modelling of voice features such as F0 patterns, vibratos, and note and phoneme durations, than previous models in the literature. Despite this, it shows a strong improvement in naturalness with respect to previous neural singing synthesis models. The model does not require any durations or pitch patterns as inputs, and learns to insert vibrato autonomously according to the musical context. However, we observe that, by completely dispensing with any explicit duration modelling it becomes harder to obtain the fine control of timing needed to exactly match the tempo of a song.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yusong Wu|AUTHOR Yusong Wu]]^^1^^, [[Shengchen Li|AUTHOR Shengchen Li]]^^2^^, [[Chengzhu Yu|AUTHOR Chengzhu Yu]]^^3^^, [[Heng Lu|AUTHOR Heng Lu]]^^4^^, [[Chao Weng|AUTHOR Chao Weng]]^^3^^, [[Liqiang Zhang|AUTHOR Liqiang Zhang]]^^5^^, [[Dong Yu|AUTHOR Dong Yu]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BUPT, China; ^^2^^BUPT, China; ^^3^^Tencent, USA; ^^4^^Tencent, USA; ^^5^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1226–1230&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Peking Opera has been the most dominant form of Chinese performing art since around 200 years ago. A Peking Opera singer usually exhibits a very strong personal style via introducing improvisation and expressiveness on stage which leads the actual rhythm and pitch contour to deviate significantly from the original music score. This inconsistency poses a great challenge in Peking Opera singing voice synthesis from a music score. In this work, we propose to deal with this issue and synthesize expressive Peking Opera singing from the music score based on the Duration Informed Attention Network (DurIAN) framework. To tackle the rhythm mismatch, Lagrange multiplier is used to find the optimal output phoneme duration sequence with the constraint of the given note duration from music score. As for the pitch contour mismatch, instead of directly inferring from music score, we adopt a pseudo music score generated from the real singing and feed it as input during training. The experiments demonstrate that with the proposed system we can synthesize Peking Opera singing voice with high-quality timbre, pitch and expressiveness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Liqiang Zhang|AUTHOR Liqiang Zhang]]^^1^^, [[Chengzhu Yu|AUTHOR Chengzhu Yu]]^^2^^, [[Heng Lu|AUTHOR Heng Lu]]^^2^^, [[Chao Weng|AUTHOR Chao Weng]]^^2^^, [[Chunlei Zhang|AUTHOR Chunlei Zhang]]^^2^^, [[Yusong Wu|AUTHOR Yusong Wu]]^^3^^, [[Xiang Xie|AUTHOR Xiang Xie]]^^4^^, [[Zijin Li|AUTHOR Zijin Li]]^^5^^, [[Dong Yu|AUTHOR Dong Yu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BIT, China; ^^2^^Tencent, USA; ^^3^^Tencent, China; ^^4^^BIT, China; ^^5^^China Conservatory of Music, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1231–1235&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Singing voice conversion is converting the timbre in the source singing to the target speaker’s voice while keeping singing content the same. However, singing data for target speaker is much more difficult to collect compared with normal speech data. In this paper, we introduce a singing voice conversion algorithm that is capable of generating high quality target speaker’s singing using only his/her normal speech data. First, we manage to integrate the training and conversion process of speech and singing into one framework by unifying the features used in standard speech synthesis system and singing synthesis system. In this way, normal speech data can also contribute to singing voice conversion training, making the singing voice conversion system more robust especially when the singing database is small. Moreover, in order to achieve one-shot singing voice conversion, a speaker embedding module is developed using both speech and singing data, which provides target speaker identify information during conversion. Experiments indicate proposed sing conversion system can convert source singing to target speaker’s high-quality singing with only 20 seconds of target speaker’s enrollment speech data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuanbo Hou|AUTHOR Yuanbo Hou]]^^1^^, [[Frank K. Soong|AUTHOR Frank K. Soong]]^^2^^, [[Jian Luan|AUTHOR Jian Luan]]^^2^^, [[Shengchen Li|AUTHOR Shengchen Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BUPT, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1236–1240&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Detecting singing-voice in polyphonic instrumental music is critical to music information retrieval. To train a robust vocal detector, a large dataset marked with //vocal// or //non-vocal// label at frame-level is essential. However, frame-level labeling is time-consuming and labor expensive, resulting there is little well-labeled dataset available for singing-voice detection (S-VD). Hence, we propose a data augmentation method for S-VD by transfer learning. In this study, clean speech clips with voice activity endpoints and separate instrumental music clips are artificially added together to simulate polyphonic vocals to train a //vocal /non-vocal// detector. Due to the different articulation and phonation between speaking and singing, the vocal detector trained with the artificial dataset does not match well with the polyphonic music which is singing vocals together with the instrumental accompaniments. To reduce this mismatch, transfer learning is used to transfer the knowledge learned from the artificial speech-plus-music training set to a small but matched polyphonic dataset, i.e., singing vocals with accompaniments. By transferring the related knowledge to make up for the lack of well-labeled training data in S-VD, the proposed data augmentation method by transfer learning can improve S-VD performance with an //F-score// improvement from 89.5% to 93.2%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haohe Liu|AUTHOR Haohe Liu]], [[Lei Xie|AUTHOR Lei Xie]], [[Jian Wu|AUTHOR Jian Wu]], [[Geng Yang|AUTHOR Geng Yang]]
</p><p class="cpabstractcardaffiliationlist">Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1241–1245&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a new input format, channel-wise subband input (CWS), for convolutional neural networks (CNN) based music source separation (MSS) models in the frequency domain. We aim to address the major issues in CNN-based high-resolution MSS model: high computational cost and weight sharing between distinctly different bands. Specifically, in this paper, we decompose the input mixture spectra into several bands and concatenate them channel-wise as the model input. The proposed approach enables effective weight sharing in each subband and introduces more flexibility between channels. For comparison purposes, we perform voice and accompaniment separation (VAS) on models with different scales, architectures, and CWS settings. Experiments show that the CWS input is beneficial in many aspects. We evaluate our method on //musdb18hq// test set, focusing on SDR, SIR and SAR metrics. Among all our experiments, CWS enables models to obtain 6.9% performance gain on the average metrics. With even a smaller number of parameters, less training data, and shorter training time, ourMDenseNet with 8-bands CWS input still surpasses the original MMDenseNet with a large margin. Moreover, CWS also reduces computational cost and training time to a large extent.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Samik Sadhu|AUTHOR Samik Sadhu]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1246–1250&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We emulate continual learning observed in real life, where new training data, which represent new application domain, are used for gradual improvement of an Automatic Speech Recognizer (ASR) trained on old domains. The data on which the original classifier was trained is no longer required and we observe no loss of performance on the original domain. Further, on previously unseen domain, our technique appears to yield slight advantage over offline multi-condition training. The proposed learning technique is consistent with our previously studied //ad hoc// stream attention based multi-stream ASR.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ryu Takeda|AUTHOR Ryu Takeda]], [[Kazunori Komatani|AUTHOR Kazunori Komatani]]
</p><p class="cpabstractcardaffiliationlist">Osaka University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1291–1295&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a new frame-wise //online// unsupervised adaptation method for an acoustic model based on a deep neural network (DNN). This is in contrast to many existing methods that assume //offline and supervised// processing. We use a likelihood cost function conditioned by past observations, which mathematically integrate the unsupervised adaptation and decoding process for automatic speech recognition (ASR). The issue is that the parameter update of the DNN should be less affected by outliers (model mismatch) and ASR recognition errors. Inspired by the robust adaptive filter techniques, we propose 1) parameter update control to remove the influence of the outliers and 2) regularization using L2-norm of DNN’s posterior probabilities of specific phonemes that are prone to recognition errors. Experiments showed that the phoneme recognition accuracies were improved by a maximum of 6.3 points, with an average error reduction rate of 10%, for various speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Genshun Wan|AUTHOR Genshun Wan]]^^1^^, [[Jia Pan|AUTHOR Jia Pan]]^^1^^, [[Qingran Wang|AUTHOR Qingran Wang]]^^2^^, [[Jianqing Gao|AUTHOR Jianqing Gao]]^^2^^, [[Zhongfu Ye|AUTHOR Zhongfu Ye]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^iFLYTEK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1251–1255&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In our previous work, we introduced a speaker adaptive training method based on frame-level attention mechanism for speech recognition, which has been proved an effective way to do speaker adaptive training. In this paper, we present an improved method by introducing the attention-over-attention mechanism. This attention module is used to further measure the contribution of each frame to the speaker embeddings in an utterance, and then generate an utterance-level speaker embedding to perform speaker adaptive training. Compared with the frame-level ones, the generated utterance-level speaker embeddings are more representative and stable. Experiments on both the Switchboard and AISHELL-2 tasks show that our method can achieve a relative word error rate reduction of approximately 8.0% compared with the speaker independent model, and over 6.0% compared with the traditional utterance-level d-vector-based speaker adaptive training method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yan Huang|AUTHOR Yan Huang]]^^1^^, [[Jinyu Li|AUTHOR Jinyu Li]]^^1^^, [[Lei He|AUTHOR Lei He]]^^2^^, [[Wenning Wei|AUTHOR Wenning Wei]]^^2^^, [[William Gale|AUTHOR William Gale]]^^1^^, [[Yifan Gong|AUTHOR Yifan Gong]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Microsoft, USA; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1256–1260&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Rapid unsupervised speaker adaptation in an E2E system posits us new challenges due to its end-to-end unified structure in addition to its intrinsic difficulty of data sparsity and imperfect label [1]. Previously we proposed utilizing the content relevant personalized speech synthesis for rapid speaker adaptation and achieved significant performance breakthrough in a hybrid system [2]. In this paper, we answer the following two questions: First, how to effectively perform rapid speaker adaptation in an RNN-T. Second, whether our previously proposed approach is still beneficial for the RNN-T and what are the modification and distinct observations. We apply the proposed methodology to a speaker adaptation task in a state-of-art presentation transcription RNN-T system. In the 1 min setup, it yields 11.58% or 7.95% relative word error rate (WER) reduction for the sup/unsup adaptation, comparing to the negligible gain when adapting with 1 min source speech. In the 10 min setup, it yields 15.71% or 8.00% relative WER reduction, doubling the gain of the source speech adaptation. We further apply various data filtering techniques and significantly bridge the gap between sup/unsup adaptation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yingzhu Zhao|AUTHOR Yingzhu Zhao]]^^1^^, [[Chongjia Ni|AUTHOR Chongjia Ni]]^^2^^, [[Cheung-Chi Leung|AUTHOR Cheung-Chi Leung]]^^2^^, [[Shafiq Joty|AUTHOR Shafiq Joty]]^^1^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^1^^, [[Bin Ma|AUTHOR Bin Ma]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTU, Singapore; ^^2^^Alibaba Group, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1261–1265&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end models have been introduced into automatic speech recognition (ASR) successfully and achieved superior performance compared with conventional hybrid systems, especially with the newly proposed transformer model. However, speaker mismatch between training and test data remains a problem, and speaker adaptation for transformer model can be further improved. In this paper, we propose to conduct speaker aware training for ASR in transformer model. Specifically, we propose to embed speaker knowledge through a persistent memory model into speech transformer encoder at utterance level. The speaker information is represented by a number of static speaker i-vectors, which is concatenated to speech utterance at each encoder self-attention layer. Persistent memory is thus formed by carrying speaker information through the depth of encoder. The speaker knowledge is captured from self-attention between speech and persistent memory vector in encoder. Experiment results on LibriSpeech, Switchboard and AISHELL-1 ASR task show that our proposed model brings relative 4.7%–12.5% word error rate (WER) reductions, and achieves superior results compared with other models with the same objective. Furthermore, our model brings relative 2.1%–8.3% WER reductions compared with the first persistent memory model used in ASR.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fenglin Ding|AUTHOR Fenglin Ding]], [[Wu Guo|AUTHOR Wu Guo]], [[Bin Gu|AUTHOR Bin Gu]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Jun Du|AUTHOR Jun Du]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1266–1270&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a new speaker normalization technique for acoustic model adaptation in connectionist temporal classification (CTC)-based automatic speech recognition. In the proposed method, for the inputs of a hidden layer, the mean and variance of each activation are first estimated at the speaker level. Then, we normalize each speaker representation independently by making them follow a standard normal distribution. Furthermore, we propose using an auxiliary network to dynamically generate the scaling and shifting parameters of speaker normalization, and an attention mechanism is introduced to improve performance. The experiments are conducted on the public Chinese dataset AISHELL-1. Our proposed methods present high effectiveness in adapting the CTC model, achieving up to 17.5% character error rate improvement over the speaker-independent (SI) model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Akhil Mathur|AUTHOR Akhil Mathur]]^^1^^, [[Nadia Berthouze|AUTHOR Nadia Berthouze]]^^1^^, [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University College London, UK; ^^2^^University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1271–1275&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Unsupervised domain adaptation using adversarial learning has shown promise in adapting speech models from a labeled source domain to an unlabeled target domain. However, prior works make a strong assumption that the label spaces of source and target domains are identical, which can be easily violated in real-world conditions. We present AMLS, an end-to-end architecture that performs //Adaptation under Mismatched Label Spaces// using two weighting schemes to separate shared and private classes in each domain. An evaluation on three speech adaptation tasks, namely gender, microphone, and emotion adaptation, shows that AMLS provides significant accuracy gains over baselines used in speech and vision adaptation tasks. Our contribution paves the way for applying UDA to speech models in unconstrained settings with no assumptions on the source and target label spaces.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Genta Indra Winata|AUTHOR Genta Indra Winata]], [[Samuel Cahyawijaya|AUTHOR Samuel Cahyawijaya]], [[Zihan Liu|AUTHOR Zihan Liu]], [[Zhaojiang Lin|AUTHOR Zhaojiang Lin]], [[Andrea Madotto|AUTHOR Andrea Madotto]], [[Peng Xu|AUTHOR Peng Xu]], [[Pascale Fung|AUTHOR Pascale Fung]]
</p><p class="cpabstractcardaffiliationlist">HKUST, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1276–1280&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Local dialects influence people to pronounce words of the same language differently from each other. The great variability and complex characteristics of accents create a major challenge for training a robust and accent-agnostic automatic speech recognition (ASR) system. In this paper, we introduce a cross-accented English speech recognition task as a benchmark for measuring the ability of the model to adapt to unseen accents using the existing CommonVoice corpus. We also propose an accent-agnostic approach that extends the model-agnostic meta-learning (MAML) algorithm for fast adaptation to unseen accents. Our approach significantly outperforms joint training in both zero-shot, few-shot, and all-shot in the mixed-region and cross-region settings in terms of word error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kartik Khandelwal|AUTHOR Kartik Khandelwal]], [[Preethi Jyothi|AUTHOR Preethi Jyothi]], [[Abhijeet Awasthi|AUTHOR Abhijeet Awasthi]], [[Sunita Sarawagi|AUTHOR Sunita Sarawagi]]
</p><p class="cpabstractcardaffiliationlist">IIT Bombay, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1281–1285&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce the problem of adapting a black-box, cloud-based ASR system to speech from a target accent. While leading online ASR services obtain impressive performance on mainstream accents, they perform poorly on sub-populations — we observed that the word error rate (WER) achieved by Google’s ASR API on Indian accents is almost twice the WER on US accents. Existing adaptation methods either require access to model parameters or overlay an error correcting module on output transcripts. We highlight the need for correlating outputs with the original speech to fix accent errors. Accordingly, we propose a novel coupling of an open-source accent-tuned local model with the black-box service where the output from the service guides frame-level inference in the local model. Our fine-grained merging algorithm is better at fixing accent errors than existing word-level combination strategies. Experiments on Indian and Australian accents with three leading ASR models as service, show that we achieve upto 28% relative reduction in WER over both the local and service models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[M.A. Tuğtekin Turan|AUTHOR M.A. Tuğtekin Turan]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]], [[Denis Jouvet|AUTHOR Denis Jouvet]]
</p><p class="cpabstractcardaffiliationlist">Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1286–1290&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Current automatic speech recognition (ASR) systems trained on native speech often perform poorly when applied to non-native or accented speech. In this work, we propose to compute x-vector-like accent embeddings and use them as auxiliary inputs to an acoustic model trained on native data only in order to improve the recognition of multi-accent data comprising native, non-native, and accented speech. In addition, we leverage untranscribed accented training data by means of semi-supervised learning. Our experiments show that acoustic models trained with the proposed accent embeddings outperform those trained with conventional i-vector or x-vector speaker embeddings, and achieve a 15% relative word error rate (WER) reduction on non-native and accented speech w.r.t. acoustic models trained with regular spectral features only. Semi-supervised training using just 1 hour of untranscribed speech per accent yields an additional 15% relative WER reduction w.r.t. models trained on native data only.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jie Wu|AUTHOR Jie Wu]], [[Jian Luan|AUTHOR Jian Luan]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1296–1300&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a high quality singing synthesizer that is able to model a voice with limited available recordings. Based on the sequence-to-sequence singing model, we design a multi-singer framework to leverage all the existing singing data of different singers. To attenuate the issue of musical score unbalance among singers, we incorporate an adversarial task of singer classification to make encoder output less singer dependent. Furthermore, we apply multiple random window discriminators (MRWDs) on the generated acoustic features to make the network be a GAN. Both objective and subjective evaluations indicate that the proposed synthesizer can generate higher quality singing voice than baseline (4.12 vs 3.53 in MOS). Especially, the articulation of high-pitched vowels is significantly enhanced.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[JinHong Lu|AUTHOR JinHong Lu]], [[Hiroshi Shimodaira|AUTHOR Hiroshi Shimodaira]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1301–1305&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigates the direct use of speech waveforms to predict head motion for speech-driven head-motion synthesis, whereas the use of spectral features such as MFCC as basic input features together with additional features such as energy and F0 is common in the literature. We show that, rather than combining different features that originate from waveforms, it is more effective to use waveforms directly predicting corresponding head motion. The challenge with the waveform-based approach is that waveforms contain a large amount of information irrelevant to predict head motion, which hinders the training of neural networks. To overcome the problem, we propose a canonical-correlation-constrained autoencoder (CCCAE), where hidden layers are trained to not only minimise the error but also maximise the canonical correlation with head motion. Compared with an MFCC-based system, the proposed system shows comparable performance in objective evaluation, and better performance in subject evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peiling Lu|AUTHOR Peiling Lu]], [[Jie Wu|AUTHOR Jie Wu]], [[Jian Luan|AUTHOR Jian Luan]], [[Xu Tan|AUTHOR Xu Tan]], [[Li Zhou|AUTHOR Li Zhou]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1306–1310&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents XiaoiceSing, a high-quality singing voice synthesis system which employs an integrated network for spectrum, F0 and duration modeling. We follow the main architecture of FastSpeech while proposing some singing-specific design: 1) Besides phoneme ID and position encoding, features from musical score (e.g. note pitch and length) are also added. 2) To attenuate off-key issues, we add a residual connection in F0 prediction. 3) In addition to the duration loss of each phoneme, the duration of all the phonemes in a musical note is accumulated to calculate the syllable duration loss for rhythm enhancement. Experiment results show that XiaoiceSing outperforms the baseline system of convolutional neural networks by 1.44 MOS on sound quality, 1.18 on pronunciation accuracy and 1.38 on naturalness respectively. In two A/B tests, the proposed F0 and duration modeling methods achieve 97.3% and 84.3% preference rate over baseline respectively, which demonstrates the overwhelming advantages of XiaoiceSing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ravindra Yadav|AUTHOR Ravindra Yadav]]^^1^^, [[Ashish Sardana|AUTHOR Ashish Sardana]]^^2^^, [[Vinay P. Namboodiri|AUTHOR Vinay P. Namboodiri]]^^1^^, [[Rajesh M. Hegde|AUTHOR Rajesh M. Hegde]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IIT Kanpur, India; ^^2^^NVIDIA, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1311–1315&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The ability to envisage the visual of a talking face based just on hearing a voice is a unique human capability. There have been a number of works that have solved for this ability recently. We differ from these approaches by enabling a variety of talking face generations based on single audio input. Indeed, just having the ability to generate a single talking face would make a system almost robotic in nature. In contrast, our unsupervised stochastic audio-to-video generation model allows for diverse generations from a single audio input. Particularly, we present an unsupervised stochastic audio-to-video generation model that can capture multiple modes of the video distribution. We ensure that all the diverse generations are plausible. We do so through a principled multi-modal variational autoencoder framework. We demonstrate its efficacy on the challenging LRWand GRID datasets and demonstrate performance better than the baseline, while having the ability to generate multiple diverse lip synchronized videos.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Da-Yi Wu|AUTHOR Da-Yi Wu]]^^1^^, [[Yi-Hsuan Yang|AUTHOR Yi-Hsuan Yang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Taiwan University; ^^2^^Academia Sinica</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1316–1320&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates the use of generative adversarial network (GAN)-based models for converting a speech signal into a singing one, without reference to the phoneme sequence underlying the speech. This is achieved by viewing speech-to-singing conversion as a style transfer problem. Specifically, given a speech input, and the F0 contour of the target singing output, the proposed model generates the spectrogram of a singing signal with a progressive-growing encoder/decoder architecture. Moreover, the model uses a boundary equilibrium GAN loss term such that it can learn from both paired and unpaired data. The spectrogram is finally converted into wave with a separate GAN-based vocoder. Our quantitative and qualitative analysis show that the proposed model generates singing voices with much higher naturalness than an existing non adversarially-trained baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shunsuke Goto|AUTHOR Shunsuke Goto]]^^1^^, [[Kotaro Onishi|AUTHOR Kotaro Onishi]]^^1^^, [[Yuki Saito|AUTHOR Yuki Saito]]^^2^^, [[Kentaro Tachibana|AUTHOR Kentaro Tachibana]]^^1^^, [[Koichiro Mori|AUTHOR Koichiro Mori]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^DeNA, Japan; ^^2^^University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1321–1325&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We are quite able to imagine voice characteristics of a speaker from his/her appearance, especially a face. In this paper, we propose Face2Speech, which generates speech with its characteristics predicted from a face image. This framework consists of three separately trained modules: a speech encoder, a multi-speaker text-to-speech (TTS), and a face encoder. The speech encoder outputs an embedding vector which is distinguishable from other speakers. The multi-speaker TTS synthesizes speech by using the embedding vector, and then the face encoder outputs the embedding vector of a speaker from the speaker’s face image. Experimental results of matching and naturalness tests demonstrate that synthetic speech generated with the face-derived embedding vector is comparable to one with the speech-derived embedding vector.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wentao Wang|AUTHOR Wentao Wang]]^^1^^, [[Yan Wang|AUTHOR Yan Wang]]^^1^^, [[Jianqing Sun|AUTHOR Jianqing Sun]]^^2^^, [[Qingsong Liu|AUTHOR Qingsong Liu]]^^2^^, [[Jiaen Liang|AUTHOR Jiaen Liang]]^^2^^, [[Teng Li|AUTHOR Teng Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Anhui University, China; ^^2^^Unisound, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1326–1330&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous talking head generation methods mostly focus on frontal face synthesis while neglecting natural person head motion. In this paper, a generative adversarial network (GAN) based method is proposed to generate talking head video with not only high quality facial appearance, accurate lip movement, but also natural head motion. To this aim, the facial landmarks are detected and used to represent lip motion and head pose, and the conversions from speech to these middle level representations are learned separately through Convolutional Neural Networks (CNN) with wingloss. The Gated Recurrent Unit (GRU) is adopted to regularize the sequential transition. The representations for different factors of talking head are jointly feeded to a Generative Adversarial Network (GAN) based model with an attentional mechanism to synthesize the talking video. Extensive experiments on the benchmark dataset as well as our own collected dataset validate that the propose method can yield talking videos with natural head motions, and the performance is superior to state-of-the-art talking face generation methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marc René Schädler|AUTHOR Marc René Schädler]]
</p><p class="cpabstractcardaffiliationlist">Carl von Ossietzky Universität Oldenburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1331–1335&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This contributions describes the “IISPA” submission to the Hurricane Challenge 2.0. The challenge organizers called for submissions of speech signals processed with the aim to improve their intelligibility in adverse listening conditions. They evaluated the submissions with matrix sentence tests in an international listening experiment. An intelligibility-improving signal processing approach (IISPA) inspired from research on speech perception of listeners with impaired hearing was designed. Its parameters were optimized with an objective intelligibility model, the simulation framework for auditory discrimination experiments (FADE). In FADE, a re-purposed automatic speech recognition (ASR) system is employed as a models for human speech recognition performance. The model predicted an improvement in speech recognition threshold (SRT) of approximately 5.0 dB due to the optimized IISPA. The processed speech signals were evaluated in the Hurricane Challenge 2.0. The measured improvements were language-dependent: up to 4.8 dB for the Spanish test, up to 3.8 dB for the German test, and up to 2.1 dB for the English test. The results show on the one hand the potential of using an ASR-based speech recognition model to optimize an intelligibility-improving signal processing scheme, and on the other hand the need for thorough listening experiments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haoyu Li|AUTHOR Haoyu Li]]^^1^^, [[Szu-Wei Fu|AUTHOR Szu-Wei Fu]]^^2^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^2^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NII, Japan; ^^2^^Academia Sinica</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1336–1340&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The intelligibility of natural speech is seriously degraded when exposed to adverse noisy environments. In this work, we propose a deep learning-based speech modification method to compensate for the intelligibility loss, with the constraint that the root mean square (RMS) level and duration of the speech signal are maintained before and after modifications. Specifically, we utilize an iMetricGAN approach to optimize the speech intelligibility metrics with generative adversarial networks (GANs). Experimental results show that the proposed iMetricGAN outperforms conventional state-of-the-art algorithms in terms of objective measures, i.e., speech intelligibility in bits (SIIB) and extended short-time objective intelligibility (ESTOI), under a Cafeteria noise condition. In addition, formal listening tests reveal significant intelligibility gains when both noise and reverberation exist.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jan Rennies|AUTHOR Jan Rennies]]^^1^^, [[Henning Schepker|AUTHOR Henning Schepker]]^^2^^, [[Cassia Valentini-Botinhao|AUTHOR Cassia Valentini-Botinhao]]^^3^^, [[Martin Cooke|AUTHOR Martin Cooke]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Fraunhofer IDMT, Germany; ^^2^^Carl von Ossietzky Universität Oldenburg, Germany; ^^3^^University of Edinburgh, UK; ^^4^^Ikerbasque, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1341–1345&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Understanding speech played back in noisy and reverberant conditions remains a challenging task. This paper describes the Hurricane Challenge 2.0, the second large-scale evaluation of algorithms aiming to solve the near-end listening enhancement problem. The challenge consisted of modifying German, English, and Spanish speech, which was then evaluated by a total of 187 listeners at three sites. Nine algorithms participated in the challenge. Results indicate a large variability in performance between the algorithms, and that some entries achieved large speech intelligibility benefits. The largest observed benefits corresponded to intensity changes of about 7 dB, which exceeded the results obtained in the previous challenge despite more complex listening conditions. A priori information about the acoustic conditions did not provide a general advantage.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Olympia Simantiraki|AUTHOR Olympia Simantiraki]]^^1^^, [[Martin Cooke|AUTHOR Martin Cooke]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad del País Vasco, Spain; ^^2^^Ikerbasque, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1346–1350&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Fast speech may reduce intelligibility, but there is little agreement as to whether listeners benefit from slower speech in noisy conditions. The current study explored the relationship between speech rate and masker properties using a listening preference technique in which participants were able to control speech rate in real time. Spanish listeners adjusted speech rate while listening to word sequences in quiet, in stationary noise at signal-to-noise ratios of 0, +6 and +12 dB, and in modulated noise for 5 envelope modulation rates. Following selection of a preferred rate, participants went on to identify words presented at that rate. Listeners favoured faster speech in quiet, chose increasingly slower rates in increasing levels of stationary noise, and showed a preference for speech rates that led to a contrast with masker envelope modulation rates. Participants showed distinct preferences even when intelligibility was near ceiling levels. These outcomes suggest that individuals attempt to compensate for the decrement in cognitive resources availability in more adverse conditions by reducing speech rate and are able to exploit differences in modulation properties of the target speech and masker. The listening preference approach provides insights into factors such as listening effort that are not measured in intelligibility-based metrics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Felicitas Bederna|AUTHOR Felicitas Bederna]]^^1^^, [[Henning Schepker|AUTHOR Henning Schepker]]^^2^^, [[Christian Rollwage|AUTHOR Christian Rollwage]]^^1^^, [[Simon Doclo|AUTHOR Simon Doclo]]^^1^^, [[Arne Pusch|AUTHOR Arne Pusch]]^^1^^, [[Jörg Bitzer|AUTHOR Jörg Bitzer]]^^1^^, [[Jan Rennies|AUTHOR Jan Rennies]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Fraunhofer IDMT, Germany; ^^2^^Carl von Ossietzky Universität Oldenburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1351–1355&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Near-end listening enhancement (NELE) algorithms aim to pre-process speech prior to playback via loudspeakers so as to maintain high speech intelligibility even when listening conditions are not optimal, e.g., due to noise or reverberation. Often NELE algorithms are designed for scenarios considering either only the detrimental effect of noise or only reverberation, but not both disturbances. In many typical applications scenarios, however, both factors are present. In this paper, we evaluate a new combination of a noise-dependent and a reverberation-dependent algorithm implemented in a common framework. Specifically, we use instrumental measures as well as subjective ratings of listening effort for acoustic scenarios with different reverberation times and realistic signal-to-noise ratios. The results show that the noise-dependent algorithm also performs well in reverberation, and that the combination of both algorithms can yield slightly better performance than the individual algorithms alone. This benefit appears to depend strongly on the specific acoustic condition, indicating that further work is required to optimize the adaptive algorithm behavior.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Carol Chermaz|AUTHOR Carol Chermaz]], [[Simon King|AUTHOR Simon King]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1356–1360&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present the beta version of ASE (the Automatic Sound Engineer), a NELE (Near End Listening Enhancement) algorithm based on audio engineering knowledge. Generations of sound engineers have improved the intelligibility of speech against competing sounds and reverberation, while maintaining high sound quality and artistic integrity (e.g., audio track mixing in music and movies). We try to grasp the essential aspects of this expert knowledge and apply it to the more mundane context of speech playback in realistic noise. The algorithm described here was entered into the Hurricane Challenge 2.0, an evaluation of NELE algorithms. Results from those listening tests across three languages show the potential of our approach, which achieved improvements of over 7 dB EIC (Equivalent Intensity Change), corresponding to an absolute increase of 58% WAR (Word Accuracy Rate).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dipjyoti Paul|AUTHOR Dipjyoti Paul]]^^1^^, [[Muhammed P.V. Shifas|AUTHOR Muhammed P.V. Shifas]]^^1^^, [[Yannis Pantazis|AUTHOR Yannis Pantazis]]^^2^^, [[Yannis Stylianou|AUTHOR Yannis Stylianou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Crete, Greece; ^^2^^FORTH, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1361–1365&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The increased adoption of digital assistants makes text-to-speech (TTS) synthesis systems an indispensable feature of modern mobile devices. It is hence desirable to build a system capable of generating highly intelligible speech in the presence of noise. Past studies have investigated style conversion in TTS synthesis, yet degraded synthesized quality often leads to worse intelligibility. To overcome such limitations, we proposed a novel transfer learning approach using Tacotron and WaveRNN based TTS synthesis. The proposed speech system exploits two modification strategies: (a) Lombard speaking style data and (b) Spectral Shaping and Dynamic Range Compression (SSDRC) which has been shown to provide high intelligibility gains by redistributing the signal energy on the time-frequency domain. We refer to this extension as Lombard-SSDRC TTS system. Intelligibility enhancement as quantified by the Intelligibility in Bits (SIIB^^Gauss^^) measure shows that the proposed Lombard-SSDRC TTS system shows significant relative improvement between 110% and 130% in speech-shaped noise (SSN), and 47% to 140% in competing-speaker noise (CSN) against the state-of-the-art TTS approach. Additional subjective evaluation shows that Lombard-SSDRC TTS successfully increases the speech intelligibility with relative improvement of 455% for SSN and 104% for CSN in median keyword correction rate compared to the baseline TTS method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Roberto Gretter|AUTHOR Roberto Gretter]]^^1^^, [[Marco Matassoni|AUTHOR Marco Matassoni]]^^1^^, [[Daniele Falavigna|AUTHOR Daniele Falavigna]]^^1^^, [[Keelan Evanini|AUTHOR Keelan Evanini]]^^2^^, [[Chee Wee Leong|AUTHOR Chee Wee Leong]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^FBK, Italy; ^^2^^Educational Testing Service, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 245–249&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an overview of the ASR challenge for non-native children’s speech organized for a special session at Interspeech 2020. The data for the challenge was obtained in the context of a spoken language proficiency assessment administered at Italian schools for students between the ages of 9 and 16 who were studying English and German as a foreign language. The corpus distributed for the challenge was a subset of the English recordings. Participating teams competed either in a closed track, in which they could use only the training data released by the organizers of the challenge, or in an open track, in which they were allowed to use additional training data. The closed track received 9 entries and the open track received 7 entries, with the best scoring systems achieving substantial improvements over a state-of-the-art baseline system. This paper describes the corpus of non-native children’s speech that was used for the challenge, analyzes the results, and discusses some points that should be considered for subsequent challenges in this domain in the future.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tien-Hong Lo|AUTHOR Tien-Hong Lo]], [[Fu-An Chao|AUTHOR Fu-An Chao]], [[Shi-Yan Weng|AUTHOR Shi-Yan Weng]], [[Berlin Chen|AUTHOR Berlin Chen]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan Normal University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 250–254&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the NTNU ASR system participating in the Interspeech 2020 Non-Native Children’s Speech ASR Challenge supported by the SIG-CHILD group of ISCA. This ASR shared task is made much more challenging due to the coexisting diversity of non-native and children speaking characteristics. In the setting of closed-track evaluation, all participants were restricted to develop their systems merely based on the speech and text corpora provided by the organizer. To work around this under-resourced issue, we built our ASR system on top of CNN-TDNNF-based acoustic models, meanwhile harnessing the synergistic power of various data augmentation strategies, including both utterance- and word-level speed perturbation and spectrogram augmentation, alongside a simple yet effective data-cleansing approach. All variants of our ASR system employed an RNN-based language model to rescore the first-pass recognition hypotheses, which was trained solely on the text dataset released by the organizer. Our system with the best configuration came out in second place, resulting in a word error rate (WER) of 17.59%, while those of the top-performing, second runner-up and official baseline systems are 15.67%, 18.71%, 35.09%, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kate M. Knill|AUTHOR Kate M. Knill]], [[Linlin Wang|AUTHOR Linlin Wang]], [[Yu Wang|AUTHOR Yu Wang]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 255–259&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic spoken language assessment (SLA) is a challenging problem due to the large variations in learner speech combined with limited resources. These issues are even more problematic when considering children learning a language, with higher levels of acoustic and lexical variability, and of code-switching compared to adult data. This paper describes the ALTA system for the INTERSPEECH 2020 Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech. The data for this task consists of examination recordings of Italian school children aged 9–16, ranging in ability from minimal, to basic, to limited but effective command of spoken English. A variety of systems were developed using the limited training data available, 49 hours. State-of-the-art acoustic models and language models were evaluated, including a diversity of lexical representations, handling code-switching and learner pronunciation errors, and grade specific models. The best single system achieved a word error rate (WER) of 16.9% on the evaluation data. By combining multiple diverse systems, including both grade independent and grade specific models, the error rate was reduced to 15.7%. This combined system was the best performing submission for both the closed and open tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hemant Kathania|AUTHOR Hemant Kathania]], [[Mittul Singh|AUTHOR Mittul Singh]], [[Tamás Grósz|AUTHOR Tamás Grósz]], [[Mikko Kurimo|AUTHOR Mikko Kurimo]]
</p><p class="cpabstractcardaffiliationlist">Aalto University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 260–264&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes AaltoASR’s speech recognition system for the INTERSPEECH 2020 shared task on Automatic Speech Recognition (ASR) for non-native children’s speech. The task is to recognize non-native speech from children of various age groups given a limited amount of speech. Moreover, the speech being spontaneous has false starts transcribed as partial words, which in the test transcriptions leads to unseen partial words. To cope with these two challenges, we investigate a data augmentation-based approach. Firstly, we apply the prosody-based data augmentation to supplement the audio data. Secondly, we simulate false starts by introducing partial-word noise in the language modeling corpora creating new words. Acoustic models trained on prosody-based augmented data outperform the models using the baseline recipe or the SpecAugment-based augmentation. The partial-word noise also helps to improve the baseline language model. Our ASR system, a combination of these schemes, is placed third in the evaluation period and achieves the word error rate of 18.71%. Post-evaluation period, we observe that increasing the amounts of prosody-based augmented data leads to better performance. Furthermore, removing low-confidence-score words from hypotheses can lead to further gains. These two improvements lower the ASR error rate to 17.99%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mostafa Shahin|AUTHOR Mostafa Shahin]], [[Renée Lu|AUTHOR Renée Lu]], [[Julien Epps|AUTHOR Julien Epps]], [[Beena Ahmed|AUTHOR Beena Ahmed]]
</p><p class="cpabstractcardaffiliationlist">UNSW Sydney, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 265–268&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we describe our children’s Automatic Speech Recognition (ASR) system for the first shared task on ASR for English non-native children’s speech. The acoustic model comprises 6 Convolutional Neural Network (CNN) layers and 12 Factored Time-Delay Neural Network (TDNN-F) layers, trained by data from 5 different children’s speech corpora. Speed perturbation, Room Impulse Response (RIR), babble noise and non-speech noise data augmentation methods were utilized to enhance the model robustness. Three Language Models (LMs) were employed: an in-domain LM trained on written data and speech transcriptions of non-native children, a LM trained on non-native written data and transcription of both native and non-native children’s speech and a TEDLIUM LM trained on adult TED talks transcriptions. Lattices produced from the different ASR systems were combined and decoded using the Minimum Bayes-Risk (MBR) decoding algorithm to get the final output. Our system achieved a final Word Error Rate (WER) of 17.55% and 16.59% for both developing and testing sets respectively and ranked second among the 10 teams participating in the task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhuxin Chen|AUTHOR Zhuxin Chen]], [[Yue Lin|AUTHOR Yue Lin]]
</p><p class="cpabstractcardaffiliationlist">NetEase, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 726–730&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, the pipeline consisting of an x-vector speaker embedding front-end and a Probabilistic Linear Discriminant Analysis (PLDA) back-end has achieved state-of-the-art results in text-independent speaker verification. In this paper, we further improve the performance of x-vector and PLDA based system for text-dependent speaker verification by exploring the choice of layer to produce embedding and modifying the back-end training strategies. In particular, we probe that x-vector based embeddings, specifically the standard deviation statistics in the pooling layer, contain the information related to both speaker characteristics and spoken content. Accordingly, we modify the back-end training labels by utilizing both of the speaker-id and phrase-id. A correlation-alignment-based PLDA adaptation is also adopted to make use of the text-independent labeled data during back-end training. Experimental results on the SDSVC 2020 dataset show that our proposed methods achieve significant performance improvement compared with the x-vector and HMM based i-vector baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hossein Zeinali|AUTHOR Hossein Zeinali]]^^1^^, [[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^2^^, [[Jahangir Alam|AUTHOR Jahangir Alam]]^^3^^, [[Lukáš Burget|AUTHOR Lukáš Burget]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amirkabir University of Technology, Iran; ^^2^^A*STAR, Singapore; ^^3^^CRIM, Canada; ^^4^^Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 731–735&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modern approaches to speaker verification represent speech utterances as fixed-length embeddings. With these approaches, we implicitly assume that speaker characteristics are independent of the spoken content. Such an assumption generally holds when sufficiently long utterances are given. In this context, speaker embeddings, like i-vector and x-vector, have shown to be extremely effective. For speech utterances of short duration (in the order of a few seconds), speaker embeddings have shown significant dependency on the phonetic content. In this regard, the //SdSV Challenge 2020// was organized with a broad focus on systematic benchmark and analysis on varying degrees of phonetic variability on short-duration speaker verification (SdSV). In addition to text-dependent and text-independent tasks, the challenge features an unusual and difficult task of cross-lingual speaker verification (English vs. Persian). This paper describes the dataset and tasks, the evaluation rules and protocols, the performance metric, baseline systems, and challenge results. We also present insights gained from the evaluation and future research directions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tao Jiang|AUTHOR Tao Jiang]], [[Miao Zhao|AUTHOR Miao Zhao]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]
</p><p class="cpabstractcardaffiliationlist">Xiamen University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 736–740&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present our XMUSPEECH system for Task 1 in the Short-duration Speaker Verification (SdSV) Challenge. In this challenge, Task 1 is a Text-Dependent (TD) mode where speaker verification systems are required to automatically determine whether a test segment with specific phrase belongs to the target speaker. We leveraged the system pipeline from three aspects, including the data processing, front-end training and back-end processing. In addition, we have explored some training strategies such as spectrogram augmentation and transfer learning. The experimental results show that the attempts we had done are effective and our best single system, a transferred model with spectrogram augmentation and attentive statistic pooling, significantly outperforms the official baseline on both progress subset and evaluation subset. Finally, a fusion of seven subsystems are chosen as our primary system which yielded 0.0856 and 0.0862 in term of minDCF, for the progress subset and evaluation subset respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sung Hwan Mun|AUTHOR Sung Hwan Mun]], [[Woo Hyun Kang|AUTHOR Woo Hyun Kang]], [[Min Hyun Han|AUTHOR Min Hyun Han]], [[Nam Soo Kim|AUTHOR Nam Soo Kim]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 741–745&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes our submission to Task 1 of the Short-duration Speaker Verification (SdSV) challenge 2020. Task 1 is a text-dependent speaker verification task, where both the speaker and phrase are required to be verified. The submitted systems were composed of TDNN-based and ResNet-based front-end architectures, in which the frame-level features were aggregated with various pooling methods (e.g., statistical, self-attentive, ghostVLAD pooling). Although the conventional pooling methods provide embeddings with a sufficient amount of speaker-dependent information, our experiments show that these embeddings often lack phrase-dependent information. To mitigate this problem, we propose a new pooling and score compensation methods that leverage a CTC-based automatic speech recognition (ASR) model for taking the lexical content into account. Both methods showed improvement over the conventional techniques, and the best performance was achieved by fusing all the experimented systems, which showed 0.0785% MinDCF and 2.23% EER on the challenge’s evaluation subset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tanel Alumäe|AUTHOR Tanel Alumäe]], [[Jörgen Valk|AUTHOR Jörgen Valk]]
</p><p class="cpabstractcardaffiliationlist">TalTech, Estonia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 746–750&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents the Tallinn University of Technology systems submitted to the Short-duration Speaker Verification Challenge 2020. The challenge consists of two tasks, focusing on text-dependent and text-independent speaker verification with some cross-lingual aspects. We used speaker embedding models that consist of squeeze-and-attention based residual layers, multi-head attention and either cross-entropy-based or additive angular margin based objective function. In order to encourage the model to produce language-independent embeddings, we trained the models in a multi-task manner, using dataset specific output layers. In the text-dependent task we employed a phrase classifier to reject trials with non-matching phrases. In the text-independent task we used a language classifier to boost the scores of trials where the language of the test and enrollment utterances does not match. Our final primary metric score was 0.075 in Task 1 (ranked as 6th) and 0.118 in Task 2 (rank 8).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peng Shen|AUTHOR Peng Shen]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]
</p><p class="cpabstractcardaffiliationlist">NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 751–755&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we describe the NICT speaker verification system for the text-independent task of the short-duration speaker verification (SdSV) challenge 2020. We firstly present the details of the training data and feature preparation. Then, x-vector-based front-ends by considering different network configurations, back-ends of probabilistic linear discriminant analysis (PLDA), simplified PLDA, cosine similarity, and neural network-based PLDA are investigated and explored. Finally, we apply a greedy fusion and calibration approach to select and combine the subsystems. To improve the performance of the speaker verification system on short-duration evaluation data, we introduce our investigations on how to reduce the duration mismatch between training and test datasets. Experimental results showed that our primary fusion yielded minDCF of 0.074 and EER of 1.50 on the evaluation subset, which was the 2nd best result in the text-independent speaker verification task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jenthe Thienpondt|AUTHOR Jenthe Thienpondt]], [[Brecht Desplanques|AUTHOR Brecht Desplanques]], [[Kris Demuynck|AUTHOR Kris Demuynck]]
</p><p class="cpabstractcardaffiliationlist">Ghent University, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 756–760&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we describe the top-scoring IDLab submission for the text-independent task of the Short-duration Speaker Verification (SdSV) Challenge 2020. The main difficulty of the challenge exists in the large degree of varying phonetic overlap between the potentially cross-lingual trials, along with the limited availability of in-domain DeepMine Farsi training data. We introduce domain-balanced hard prototype mining to finetune the state-of-the-art ECAPA-TDNN x-vector based speaker embedding extractor. The sample mining technique efficiently exploits speaker distances between the speaker prototypes of the popular AAM-softmax loss function to construct challenging training batches that are balanced on the domain-level. To enhance the scoring of cross-lingual trials, we propose a language-dependent s-norm score normalization. The imposter cohort only contains data from the Farsi target-domain which simulates the enrollment data always being Farsi. In case a Gaussian-Backend language model detects the test speaker embedding to contain English, a cross-language compensation offset determined on the AAM-softmax speaker prototypes is subtracted from the maximum expected imposter mean score. A fusion of five systems with minor topological tweaks resulted in a final MinDCF and EER of 0.065 and 1.45% respectively on the SdSVC evaluation set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alicia Lozano-Diez|AUTHOR Alicia Lozano-Diez]], [[Anna Silnova|AUTHOR Anna Silnova]], [[Bhargav Pulugundla|AUTHOR Bhargav Pulugundla]], [[Johan Rohdin|AUTHOR Johan Rohdin]], [[Karel Veselý|AUTHOR Karel Veselý]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Ondřej Glembek|AUTHOR Ondřej Glembek]], [[Ondvrej Novotný|AUTHOR Ondvrej Novotný]], [[Pavel Matějka|AUTHOR Pavel Matějka]]
</p><p class="cpabstractcardaffiliationlist">Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 761–765&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present the winning BUT submission for the text-dependent task of the SdSV challenge 2020. Given the large amount of training data available in this challenge, we explore successful techniques from text-independent systems in the text-dependent scenario. In particular, we trained x-vector extractors on both in-domain and out-of-domain datasets and combine them with i-vectors trained on concatenated MFCCs and bottleneck features, which have proven effective for the text-dependent scenario. Moreover, we proposed the use of phrase-dependent PLDA backend for scoring and its combination with a simple phrase recognizer, which brings up to 63% relative improvement on our development set with respect to using standard PLDA. Finally, we combine our different i-vector and x-vector based systems using a simple linear logistic regression score level fusion, which provides 28% relative improvement on the evaluation set with respect to our best single system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vijay Ravi|AUTHOR Vijay Ravi]], [[Ruchao Fan|AUTHOR Ruchao Fan]], [[Amber Afshan|AUTHOR Amber Afshan]], [[Huanhua Lu|AUTHOR Huanhua Lu]], [[Abeer Alwan|AUTHOR Abeer Alwan]]
</p><p class="cpabstractcardaffiliationlist">University of California at Los Angeles, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 766–770&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a novel way of addressing text-dependent automatic speaker verification (TD-ASV) by using a shared-encoder with task-specific decoders. An autoregressive predictive coding (APC) encoder is pre-trained in an unsupervised manner using both out-of-domain (LibriSpeech, VoxCeleb) and in-domain (DeepMine) unlabeled datasets to learn generic, high-level feature representation that encapsulates speaker and phonetic content. Two task-specific decoders were trained using labeled datasets to classify speakers (SID) and phrases (PID). Speaker embeddings extracted from the SID decoder were scored using a PLDA. SID and PID systems were fused at the score level. There is a 51.9% relative improvement in minDCF for our system compared to the fully supervised x-vector baseline on the cross-lingual DeepMine dataset. However, the i-vector/HMM method outperformed the proposed APC encoder-decoder system. A fusion of the x-vector/PLDA baseline and the SID/PLDA scores prior to PID fusion further improved performance by 15% indicating complementarity of the proposed approach to the x-vector system. We show that the proposed approach can leverage from large, unlabeled, data-rich domains, and learn speech patterns independent of downstream tasks. Such a system can provide competitive performance in domain-mismatched scenarios where test data is from data-scarce domains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel Michelsanti|AUTHOR Daniel Michelsanti]]^^1^^, [[Olga Slizovskaia|AUTHOR Olga Slizovskaia]]^^2^^, [[Gloria Haro|AUTHOR Gloria Haro]]^^2^^, [[Emilia Gómez|AUTHOR Emilia Gómez]]^^2^^, [[Zheng-Hua Tan|AUTHOR Zheng-Hua Tan]]^^1^^, [[Jesper Jensen|AUTHOR Jesper Jensen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalborg University, Denmark; ^^2^^Universitat Pompeu Fabra, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3530–3534&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Both acoustic and visual information influence human perception of speech. For this reason, the lack of audio in a video sequence determines an extremely low speech intelligibility for untrained lip readers. In this paper, we present a way to synthesise speech from the silent video of a talker using deep learning. The system learns a mapping function from raw video frames to acoustic features and reconstructs the speech with a vocoder synthesis algorithm. To improve speech reconstruction performance, our model is also trained to predict text information in a multi-task learning fashion and it is able to simultaneously reconstruct and recognise speech in real time. The results in terms of estimated speech quality and intelligibility show the effectiveness of our method, which exhibits an improvement over existing video-to-speech approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jan Vainer|AUTHOR Jan Vainer]], [[Ondřej Dušek|AUTHOR Ondřej Dušek]]
</p><p class="cpabstractcardaffiliationlist">Charles University, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3575–3579&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While recent neural sequence-to-sequence models have greatly improved the quality of speech synthesis, there has not been a system capable of fast training, fast inference and high-quality audio synthesis at the same time. We propose a student-teacher network capable of high-quality faster-than-real-time spectrogram synthesis, with low requirements on computational resources and fast training time. We show that self-attention layers are not necessary for generation of high quality audio. We utilize simple convolutional blocks with residual connections in both student and teacher networks and use only a single attention layer in the teacher model. Coupled with a MelGAN vocoder, our model’s voice quality was rated significantly higher than Tacotron 2. Our model can be efficiently trained on a single GPU and can run in real time even on a CPU. We provide both our source code and audio samples in our GitHub repository.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]]^^1^^, [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]]^^1^^, [[Takuma Okamoto|AUTHOR Takuma Okamoto]]^^2^^, [[Hisashi Kawai|AUTHOR Hisashi Kawai]]^^2^^, [[Tomoki Toda|AUTHOR Tomoki Toda]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nagoya University, Japan; ^^2^^NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3535–3539&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a parallel WaveGAN (PWG)-like neural vocoder with a quasi-periodic (QP) architecture to improve the pitch controllability of PWG. PWG is a compact non-autoregressive (non-AR) speech generation model, whose generative speed is much faster than real time. While utilizing PWG as a vocoder to generate speech on the basis of acoustic features such as spectral and prosodic features, PWG generates high-fidelity speech. However, when the input acoustic features include unseen pitches, the pitch accuracy of PWG-generated speech degrades because of the fixed and generic network of PWG without prior knowledge of speech periodicity. The proposed QPPWG adopts a pitch-dependent dilated convolution network (PDCNN) module, which introduces the pitch information into PWG via the dynamically changed network architecture, to improve the pitch controllability and speech modeling capability of vanilla PWG. Both objective and subjective evaluation results show the higher pitch accuracy and comparable speech quality of QPPWG-generated speech when the QPPWG model size is only 70% of that of vanilla PWG.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]]^^1^^, [[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]]^^1^^, [[Kazuki Yasuhara|AUTHOR Kazuki Yasuhara]]^^1^^, [[Noriyuki Matsunaga|AUTHOR Noriyuki Matsunaga]]^^2^^, [[Yamato Ohtani|AUTHOR Yamato Ohtani]]^^2^^, [[Tomoki Toda|AUTHOR Tomoki Toda]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nagoya University, Japan; ^^2^^AI, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3540–3544&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, the effectiveness of text-to-speech (TTS) systems combined with neural vocoders to generate high-fidelity speech has been shown. However, collecting the required training data and building these advanced systems from scratch are time and resource consuming. An economical approach is to develop a neural vocoder to enhance the speech generated by existing or low-cost TTS systems. Nonetheless, this approach usually suffers from two issues: 1) temporal mismatches between TTS and natural waveforms and 2) acoustic mismatches between training and testing data. To address these issues, we adopt a cyclic voice conversion (VC) model to generate temporally matched pseudo-VC data for training and acoustically matched enhanced data for testing the neural vocoders. Because of the generality, this framework can be applied to arbitrary TTS systems and neural vocoders. In this paper, we apply the proposed method with a state-of-the-art WaveNet vocoder for two different basic TTS systems, and both objective and subjective experimental results confirm the effectiveness of the proposed framework.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hyun-Wook Yoon|AUTHOR Hyun-Wook Yoon]], [[Sang-Hoon Lee|AUTHOR Sang-Hoon Lee]], [[Hyeong-Rae Noh|AUTHOR Hyeong-Rae Noh]], [[Seong-Whan Lee|AUTHOR Seong-Whan Lee]]
</p><p class="cpabstractcardaffiliationlist">Korea University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3545–3549&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent works, a flow-based neural vocoder has shown significant improvement in real-time speech generation task. The sequence of invertible flow operations allows the model to convert samples from simple distribution to audio samples. However, training a continuous density model on discrete audio data can degrade model performance due to the topological difference between latent and actual distribution. To resolve this problem, we propose audio dequantization methods in flow-based neural vocoder for high fidelity audio generation. Data dequantization is a well-known method in image generation but has not yet been studied in the audio domain. For this reason, we implement various audio dequantization methods in flow-based neural vocoder and investigate the effect on the generated audio. We conduct various objective performance assessments and subjective evaluation to show that audio dequantization can improve audio generation quality. From our experiments, using audio dequantization produces waveform audio with better harmonic structure and fewer digital artifacts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Manish Sharma|AUTHOR Manish Sharma]], [[Tom Kenter|AUTHOR Tom Kenter]], [[Rob Clark|AUTHOR Rob Clark]]
</p><p class="cpabstractcardaffiliationlist">Google, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3550–3554&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, WaveNet has become a popular choice of neural network to synthesize speech audio. Autoregressive WaveNet is capable of producing high-fidelity audio, but is too slow for real-time synthesis. As a remedy, Parallel WaveNet was proposed, which can produce audio faster than real time through distillation of an autoregressive teacher into a feedforward student network. A shortcoming of this approach, however, is that a large amount of recorded speech data is required to produce high-quality student models, and this data is not always available. In this paper, we propose StrawNet: a self-training approach to train a Parallel WaveNet. Self-training is performed using the synthetic examples generated by the autoregressive WaveNet teacher. We show that, in low-data regimes, training on high-fidelity synthetic data from an autoregressive teacher model is superior to training the student model on (much fewer) examples of recorded speech. We compare StrawNet to a baseline Parallel WaveNet, using both side-by-side tests and Mean Opinion Score evaluations. To our knowledge, synthetic speech has not been used to train neural text-to-speech before.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yang Cui|AUTHOR Yang Cui]], [[Xi Wang|AUTHOR Xi Wang]], [[Lei He|AUTHOR Lei He]], [[Frank K. Soong|AUTHOR Frank K. Soong]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3555–3559&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>LPCNet neural vocoder and its variants have shown the ability to synthesize high-quality speech in small footprint by exploiting domain knowledge in speech. In this paper, we introduce subband linear prediction in LPCNet for producing high fidelity speech more efficiently with consideration of subband correlation. Speech is decomposed into multiple subband signals with linear prediction to reduce the complexity of neural vocoder. A novel subband-based autoregressive model is proposed to learn the joint distribution of the subband sequences by introducing a reasonable assumption, which keeps the dependence between subbands while accelerating the inference speed. Based upon the human auditory perception sensitivity to the harmonic speech components in the baseband, we allocate more computational resources to model the low-frequency subband to synthesize natural phase and magnitude of the synthesized speech. Both objective and subjective tests show the proposed subband LPCNet neural vocoder can synthesize higher quality speech than the original fullband one (MOS 4.62 vs. 4.54), at a rate nearly three times faster.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yang Ai|AUTHOR Yang Ai]]^^1^^, [[Xin Wang|AUTHOR Xin Wang]]^^2^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^2^^, [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^NII, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3560–3564&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a reverberation module for source-filter-based neural vocoders that improves the performance of reverberant effect modeling. This module uses the output waveform of neural vocoders as an input and produces a reverberant waveform by convolving the input with a room impulse response (RIR). We propose two approaches to parameterizing and estimating the RIR. The first approach assumes a global time-invariant (GTI) RIR and directly learns the values of the RIR on a training dataset. The second approach assumes an utterance-level time-variant (UTV) RIR, which is invariant within one utterance but varies across utterances, and uses another neural network to predict the RIR values. We add the proposed reverberation module to the phase spectrum predictor (PSP) of a HiNet vocoder and jointly train the model. Experimental results demonstrate that the proposed module was helpful for modeling the reverberation effect and improving the perceived quality of generated reverberant speech. The UTV-RIR was shown to be more robust than the GTI-RIR to unknown reverberation conditions and achieved a perceptually better reverberation effect.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ravichander Vipperla|AUTHOR Ravichander Vipperla]]^^1^^, [[Sangjun Park|AUTHOR Sangjun Park]]^^2^^, [[Kihyun Choo|AUTHOR Kihyun Choo]]^^2^^, [[Samin Ishtiaq|AUTHOR Samin Ishtiaq]]^^1^^, [[Kyoungbo Min|AUTHOR Kyoungbo Min]]^^2^^, [[Sourav Bhattacharya|AUTHOR Sourav Bhattacharya]]^^1^^, [[Abhinav Mehrotra|AUTHOR Abhinav Mehrotra]]^^1^^, [[Alberto Gil C.P. Ramos|AUTHOR Alberto Gil C.P. Ramos]]^^1^^, [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Samsung, UK; ^^2^^Samsung, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3565–3569&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>LPCNet is an efficient vocoder that combines linear prediction and deep neural network modules to keep the computational complexity low. In this work, we present two techniques to further reduce it’s complexity, aiming for a low-cost LPCNet vocoder-based neural Text-to-Speech (TTS) System. These techniques are: 1) Sample-bunching, which allows LPCNet to generate more than one audio sample per inference; and 2) Bit-bunching, which reduces the computations in the final layer of LPCNet. With the proposed bunching techniques, LPCNet, in conjunction with a Deep Convolutional TTS (DCTTS) acoustic model, shows a 2.19× improvement over the baseline run-time when running on a mobile device, with a less than 0.1 decrease in TTS mean opinion score (MOS).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Eunwoo Song|AUTHOR Eunwoo Song]]^^1^^, [[Min-Jae Hwang|AUTHOR Min-Jae Hwang]]^^2^^, [[Ryuichi Yamamoto|AUTHOR Ryuichi Yamamoto]]^^3^^, [[Jin-Seob Kim|AUTHOR Jin-Seob Kim]]^^1^^, [[Ohsung Kwon|AUTHOR Ohsung Kwon]]^^1^^, [[Jae-Min Kim|AUTHOR Jae-Min Kim]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Naver, Korea; ^^2^^Search Solutions, Korea; ^^3^^LINE, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3570–3574&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a modeling-by-generation (MbG) excitation vocoder for a neural text-to-speech (TTS) system. Recently proposed neural excitation vocoders can realize qualified waveform generation by combining a vocal tract filter with a WaveNet-based glottal excitation generator. However, when these vocoders are used in a TTS system, the quality of synthesized speech is often degraded owing to a mismatch between training and synthesis steps. Specifically, the vocoder is separately trained from an acoustic model front-end. Therefore, estimation errors of the acoustic model are inevitably boosted throughout the synthesis process of the vocoder backend. To address this problem, we propose to incorporate an MbG structure into the vocoder’s training process. In the proposed method, the excitation signal is extracted by the acoustic model’s generated spectral parameters, and the neural vocoder is then optimized not only to learn the target excitation’s distribution but also to compensate for the estimation errors occurring from the acoustic model. Furthermore, as the generated spectral parameters are shared in the training and synthesis steps, their mismatch conditions can be reduced effectively. The experimental results verify that the proposed system provides high-quality synthetic speech by achieving a mean opinion score of 4.57 within the TTS framework.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ziteng Wang|AUTHOR Ziteng Wang]]^^1^^, [[Yueyue Na|AUTHOR Yueyue Na]]^^1^^, [[Zhang Liu|AUTHOR Zhang Liu]]^^1^^, [[Yun Li|AUTHOR Yun Li]]^^2^^, [[Biao Tian|AUTHOR Biao Tian]]^^1^^, [[Qiang Fu|AUTHOR Qiang Fu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Alibaba Group, China; ^^2^^Alibaba Group, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3925–3929&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a novel semi-blind source separation approach for speech dereverberation. Based on a time independence assumption of the clean speech signals, direct sound and late reverberation are treated as separate sources and are separated using the auxiliary function based independent component analysis (Aux-ICA) algorithm. We show that the dereverberation performance is closely related to the underlying source probability density prior and the proposed approach generalizes to the popular weighted prediction error (WPE) algorithm, if the direct sound follows a Gaussian distribution with time-varying variances. The efficacy of the proposed approach is fully validated by speech quality and speech recognition experiments conducted on the REVERB Challenge dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jung-Hee Kim|AUTHOR Jung-Hee Kim]], [[Joon-Hyuk Chang|AUTHOR Joon-Hyuk Chang]]
</p><p class="cpabstractcardaffiliationlist">Hanyang University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3969–3973&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a Wave-U-Net based acoustic echo cancellation (AEC) with an attention mechanism is proposed to jointly suppress acoustic echo and background noise. The proposed approach consists of the Wave-U-Net, an auxiliary encoder, and an attention network. In the proposed approach, the Wave-U-Net yields the estimated near-end speech from the mixture, the auxiliary encoder extracts the latent features of the far-end speech, among which the relevant features are provided to the Wave-U-Net by using the attention mechanism. With the attention network, the echo can be effectively suppressed from the mixture. Experimental results on TIMIT dataset show that the proposed approach outperforms the existing methods in terms of the echo return loss enhancement (ERLE) for the single-talk period and the perceptual evaluation of speech quality (PESQ) score for the double-talk period. Furthermore, the robustness of the proposed approach against unseen noise condition is also validated from the experimental results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joon-Young Yang|AUTHOR Joon-Young Yang]], [[Joon-Hyuk Chang|AUTHOR Joon-Hyuk Chang]]
</p><p class="cpabstractcardaffiliationlist">Hanyang University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3930–3934&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we propose a neural-network-based virtual acoustic channel expansion (VACE) framework for weighted prediction error (WPE)-based speech dereverberation. Specifically, for the situation in which only a single microphone observation is available, we aim to build a neural network capable of generating a virtual signal that can be exploited as the secondary input for the dual-channel WPE algorithm, thus making its dereverberation performance superior to the single-channel WPE. To implement the VACE-WPE, the neural network for the VACE is initialized and integrated to the pre-trained neural WPE algorithm. The entire system is then trained in a supervised manner to output a dereverberated signal that is close to the oracle early arriving speech. Experimental results show that the proposed VACE-WPE method outperforms the single-channel WPE in a real room impulse response shortening task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vinay Kothapally|AUTHOR Vinay Kothapally]]^^1^^, [[Wei Xia|AUTHOR Wei Xia]]^^1^^, [[Shahram Ghorbani|AUTHOR Shahram Ghorbani]]^^1^^, [[John H.L. Hansen|AUTHOR John H.L. Hansen]]^^1^^, [[Wei Xue|AUTHOR Wei Xue]]^^2^^, [[Jing Huang|AUTHOR Jing Huang]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Dallas, USA; ^^2^^JD.com, China; ^^3^^JD.com, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3935–3939&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The reliability of using fully convolutional networks (FCNs) has been successfully demonstrated by recent studies in many speech applications. One of the most popular variants of these FCNs is the ‘U-Net’, which is an encoder-decoder network with skip connections. In this study, we propose ‘SkipConvNet’ where we replace each skip connection with multiple convolutional modules to provide decoder with intuitive feature maps rather than encoder’s output to improve the learning capacity of the network. We also propose the use of optimal smoothing of power spectral density (PSD) as a pre-processing step, which helps to further enhance the efficiency of the network. To evaluate our proposed system, we use the REVERB challenge corpus to assess the performance of various enhancement approaches under the same conditions. We focus solely on monitoring improvements in speech quality and their contribution to improving the efficiency of back-end speech systems, such as speech recognition and speaker verification, trained on only clean speech. Experimental findings show that the proposed system consistently outperforms other approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chenggang Zhang|AUTHOR Chenggang Zhang]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]]
</p><p class="cpabstractcardaffiliationlist">Inner Mongolia University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3940–3944&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic echo cancellation (AEC) is used to cancel feedback between a loudspeaker and a microphone. Ideally, AEC is a linear problem and can be solved by adaptive filtering. However, in practice, two important problems severely affect the performance of AEC, i.e. 1) double-talk problem and 2) nonlinear distortion mainly caused by loudspeakers and/or power amplifiers. Considering these two problems in AEC, we propose a novel cascaded AEC which integrates adaptive filtering and deep learning. Specifically, two long short-term memory networks (LSTM) are employed for double-talk detection (DTD) and nonlinearity modeling, respectively. The adaptive filtering is employed to remove the linear part of echo. Experimental results show that the proposed method outperforms conventional methods in terms of the objective evaluation metrics by a considerable margin in the matched scenario. Moreover, the proposed method has much better generalization ability in the unmatched scenarios, compared with end-to-end deep learning method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Zhang|AUTHOR Yi Zhang]], [[Chengyun Deng|AUTHOR Chengyun Deng]], [[Shiqian Ma|AUTHOR Shiqian Ma]], [[Yongtao Sha|AUTHOR Yongtao Sha]], [[Hui Song|AUTHOR Hui Song]], [[Xiangang Li|AUTHOR Xiangang Li]]
</p><p class="cpabstractcardaffiliationlist">DiDi Chuxing, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3945–3949&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Generative adversarial networks (GANs) have become a popular research topic in speech enhancement like noise suppression. By training the noise suppression algorithm in an adversarial scenario, GAN based solutions often yield good performance. In this paper, a convolutional recurrent GAN architecture (CRGAN-EC) is proposed to address both linear and nonlinear echo scenarios. The proposed architecture is trained in frequency domain and predicts the time-frequency (TF) mask for the target speech. Several metric loss functions are deployed and their influence on echo cancellation performance is studied. Experimental results suggest that the proposed method outperforms the existing methods for unseen speakers in terms of echo return loss enhancement (ERLE) and perceptual evaluation of speech quality (PESQ). Moreover, multiple metric loss functions provide more freedom to achieve specific goals, e.g., more echo suppression or less distortion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lukas Pfeifenberger|AUTHOR Lukas Pfeifenberger]], [[Franz Pernkopf|AUTHOR Franz Pernkopf]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität Graz, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3950–3954&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The acoustic front-end of hands-free communication devices introduces a variety of distortions to the linear echo path between the loudspeaker and the microphone. While the amplifiers may introduce a memory-less non-linearity, mechanical vibrations transmitted from the loudspeaker to the microphone via the housing of the device introduce non-linearities with memory, which are much harder to compensate. These distortions significantly limit the performance of linear Acoustic Echo Cancellation (AEC) algorithms. While there already exists a wide range of Residual Echo Suppressor (RES) techniques for individual use cases, our contribution specifically aims at a low-resource implementation that is also real-time capable. The proposed approach is based on a small Recurrent Neural Network (RNN) which adds memory to the residual echo suppressor, enabling it to compensate both types of non-linear distortions. We evaluate the performance of our system in terms of Echo Return Loss Enhancement (ERLE), Signal to Distortion Ratio (SDR) and Word Error Rate (WER), obtained during realistic double-talk situations. Further, we compare the postfilter against a state-of-the-art implementation. Finally, we analyze the numerical complexity of the overall system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Gao|AUTHOR Yi Gao]]^^1^^, [[Ian Liu|AUTHOR Ian Liu]]^^2^^, [[J. Zheng|AUTHOR J. Zheng]]^^1^^, [[Cheng Luo|AUTHOR Cheng Luo]]^^1^^, [[Bin Li|AUTHOR Bin Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tencent, China; ^^2^^Amazon, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3955–3958&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>As stereophonic audio devices, such as smart speakers and cellphones, evolve to be daily essentials, stereophonic acoustic echo cancellation becomes more important for voice and audio applications. The cross-correlation between the far-end channels and the associated ambiguity in the estimated echo path transfer functions lead the misalignment and instability issues with conventional stereophonic acoustic echo cancellers (SAEC). In this paper, we propose a novel SAEC algorithm, which can better model the acoustic echo path between each loudspeaker and microphone. Specifically, filter adaptations are modeled independently by applying pre-whitening in solving the misalignment problem. Improvement in echo suppression capability is evaluated in terms of echo return loss enhancement(ERLE) and wakeup word detection accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongsheng Chen|AUTHOR Hongsheng Chen]], [[Teng Xiang|AUTHOR Teng Xiang]], [[Kai Chen|AUTHOR Kai Chen]], [[Jing Lu|AUTHOR Jing Lu]]
</p><p class="cpabstractcardaffiliationlist">Nanjing University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3959–3963&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic echo cannot be entirely removed by linear adaptive filters due to the nonlinear relationship between the echo and far-end signal. Usually a post processing module is required to further suppress the echo. In this paper, we propose a residual echo suppression method based on the modification of fully convolutional time-domain audio separation network (Conv-TasNet). Both the residual signal of the linear acoustic echo cancellation system, and the output of the adaptive filter are adopted to form multiple streams for the Conv-TasNet, resulting in more effective echo suppression while keeping a lower latency of the whole system. Simulation results validate the efficacy of the proposed method in both single-talk and double-talk situations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenzhi Fan|AUTHOR Wenzhi Fan]], [[Jing Lu|AUTHOR Jing Lu]]
</p><p class="cpabstractcardaffiliationlist">Nanjing University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3964–3968&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, a partitioned-block-based frequency-domain Kalman filter (PFKF) has been proposed for acoustic echo cancellation. Compared with the normal frequency-domain Kalman filter, the PFKF utilizes the partitioned-block structure, resulting in both fast convergence and low time-latency. We present an analysis of the steady-state behavior of the PFKF and found that it suffers from a biased steady-state solution when the filter is of deficient length. Accordingly, we propose an effective modification that has the benefit of the guaranteed optimal steady-state behavior. Simulations are conducted to validate the improved performance of the proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zexin Cai|AUTHOR Zexin Cai]]^^1^^, [[Chuxiong Zhang|AUTHOR Chuxiong Zhang]]^^2^^, [[Ming Li|AUTHOR Ming Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Duke University, USA; ^^2^^Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3974–3978&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>High-fidelity speech can be synthesized by end-to-end text-to-speech models in recent years. However, accessing and controlling speech attributes such as speaker identity, prosody, and emotion in a text-to-speech system remains a challenge. This paper presents a system involving feedback constraints for multispeaker speech synthesis. We manage to enhance the knowledge transfer from the speaker verification to the speech synthesis by engaging the speaker verification network. The constraint is taken by an added loss related to the speaker identity, which is centralized to improve the speaker similarity between the synthesized speech and its natural reference audio. The model is trained and evaluated on publicly available datasets. Experimental results, including visualization on speaker embedding space, show significant improvement in terms of speaker identity cloning in the spectrogram level. In addition, synthesized samples are available online for listening.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jason Fong|AUTHOR Jason Fong]], [[Jason Taylor|AUTHOR Jason Taylor]], [[Simon King|AUTHOR Simon King]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4019–4023&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Accurate pronunciation is an essential requirement for text-to-speech (TTS) systems. Systems trained on raw text exhibit pronunciation errors in output speech due to ambiguous letter-to-sound relations. Without an intermediate phonemic representation, it is difficult to intervene and correct these errors. Retaining explicit control over pronunciation runs counter to the current drive toward end-to-end (E2E) TTS using sequence-to-sequence models. On the one hand, E2E TTS aims to eliminate manual intervention, especially expert skill such as phonemic transcription of words in a lexicon. On the other, a system making difficult-to-correct pronunciation errors is of little practical use. Some intervention is necessary. We explore the minimal amount of linguistic features required to correct pronunciation errors in an otherwise E2E TTS system that accepts graphemic input. We use representation-mixing: within each sequence the system accepts either graphemic and/or phonemic input. We quantify how little training data needs to be phonemically labelled — that is, how small a lexicon must be written — to ensure control over pronunciation. We find modest correction is possible with 500 phonemised word types from the LJ speech dataset but correction works best when the majority of word types are phonemised with syllable boundaries.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mingjian Chen|AUTHOR Mingjian Chen]]^^1^^, [[Xu Tan|AUTHOR Xu Tan]]^^2^^, [[Yi Ren|AUTHOR Yi Ren]]^^3^^, [[Jin Xu|AUTHOR Jin Xu]]^^4^^, [[Hao Sun|AUTHOR Hao Sun]]^^1^^, [[Sheng Zhao|AUTHOR Sheng Zhao]]^^2^^, [[Tao Qin|AUTHOR Tao Qin]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Peking University, China; ^^2^^Microsoft, China; ^^3^^Zhejiang University, China; ^^4^^Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4024–4028&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformer-based text to speech (TTS) model (e.g., Transformer TTS [1], FastSpeech [2]) has shown the advantages of training and inference efficiency over RNN-based model (e.g., Tacotron [3]) due to its parallel computation in training and/or inference. However, the parallel computation increases the difficulty while learning the alignment between text and speech in Transformer, which is further magnified in the multi-speaker scenario with noisy data and diverse speakers, and hinders the applicability of Transformer for multi-speaker TTS. In this paper, we develop a robust and high-quality multi-speaker Transformer TTS system called MultiSpeech, with several specially designed components/techniques to improve text-to-speech alignment: 1) a diagonal constraint on the weight matrix of encoder-decoder attention in both training and inference; 2) layer normalization on phoneme embedding in encoder to better preserve position information; 3) a bottleneck in decoder pre-net to prevent copy between consecutive speech frames. Experiments on VCTK and LibriTTS multi-speaker datasets demonstrate the effectiveness of MultiSpeech: 1) it synthesizes more robust and better quality multi-speaker voice than naive Transformer based TTS; 2) with a MultiSpeech model as the teacher, we obtain a strong multi-speaker FastSpeech model with almost zero quality degradation while enjoying extremely fast inference speed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Erica Cooper|AUTHOR Erica Cooper]]^^1^^, [[Cheng-I Lai|AUTHOR Cheng-I Lai]]^^2^^, [[Yusuke Yasuda|AUTHOR Yusuke Yasuda]]^^1^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NII, Japan; ^^2^^MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3979–3983&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous work on speaker adaptation for end-to-end speech synthesis still falls short in speaker similarity. We investigate an orthogonal approach to the current speaker adaptation paradigms, //speaker augmentation//, by creating artificial speakers and by taking advantage of low-quality data. The base Tacotron2 model is modified to account for the channel and dialect factors inherent in these corpora. In addition, we describe a warm-start training strategy that we adopted for Tacotron2 training. A large-scale listening test is conducted, and a distance metric is adopted to evaluate synthesis of dialects. This is followed by an analysis on synthesis quality, speaker and dialect similarity, and a remark on the effectiveness of our speaker augmentation approach. Audio samples are available online¹.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tao Wang|AUTHOR Tao Wang]], [[Xuefei Liu|AUTHOR Xuefei Liu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Ruibo Fu|AUTHOR Ruibo Fu]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3984–3988&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most end-to-end neural text-to-speech (TTS) systems generate acoustic features autoregressively from left to right, which still suffer from two problems: 1) low efficiency during inference; 2) the limitation of “exposure bias”. To overcome these shortcomings, this paper proposes a non-autoregressive speech synthesis model which is based on the transformer structure. During training, the ground truth of acoustic features is schedule masked. The decoder needs to predict the entire acoustic features by taking text and the masked ground truth. During inference, we just need a text as input, the network will predict the acoustic features in one step. Additionally, we decompose the decoding process into two stages so that the model can consider the information in the context. Given an input text embedding, we first generate coarse acoustic features, which focus on the meaning of sentences. Then, we fill in missing details of acoustic features by taking into account the text information and the coarse acoustic features. Experiments on a Chinese female corpus illustrate that our approach can achieve competitive results in speech naturalness relative to autoregressive model. Most importantly, our model speed up the acoustic features generation by 296× compared with the autoregressive model based on transformer structure.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tao Wang|AUTHOR Tao Wang]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Ruibo Fu|AUTHOR Ruibo Fu]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Chunyu Qiang|AUTHOR Chunyu Qiang]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3989–3993&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The gap between speaker characteristics of reference speech and synthesized speech remains a challenging problem in one-shot speech synthesis. In this paper, we propose a bi-level speaker supervision framework to close the speaker characteristics gap via supervising the synthesized speech at speaker feature level and speaker identity level. The speaker feature extraction and speaker identity reconstruction are integrated in an end-to-end speech synthesis network, with the one on speaker feature level for closing speaker characteristics and the other on speaker identity level for preserving identity information. This framework guarantees that the synthesized speech has similar speaker characteristics to original speech, and it also ensures the distinguishability between different speakers. Additionally, to solve the influence of speech content on speaker feature extraction task, we propose a text-independent reference encoder (ti-reference encoder) module to extract speaker feature. Experiments on LibriTTS dataset show that our model is able to generate the speech similar to target speaker. Furthermore, we demonstrate that this model can learn meaningful speaker representations by bi-level speaker supervision and ti-reference encoder module.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alex Peiró-Lilja|AUTHOR Alex Peiró-Lilja]], [[Mireia Farrús|AUTHOR Mireia Farrús]]
</p><p class="cpabstractcardaffiliationlist">Universitat Pompeu Fabra, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3994–3998&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art end-to-end speech synthesis models have reached levels of quality close to human capabilities. However, there is still room for improvement in terms of naturalness, related to prosody, which is essential for human-machine interaction. Therefore, part of current research has shift its focus on improving this aspect with many solutions, which mainly involve prosody adaptability or control. In this work, we explored a way to include linguistic features into the sequence-to-sequence Tacotron2 system to improve the naturalness of the generated voice. That is, making the prosody of the synthesis looking more like the real human speaker. Specifically we embedded with an additional encoder part-of-speech tags and punctuation mark locations of the input text to condition Tacotron2 generation. We propose two different architectures for this parallel encoder: one based on a stack of convolutional plus recurrent layers, and another formed by a stack of bidirectional recurrent plus linear layers. To evaluate the similarity between real read-speech and synthesis, we carried out an objective test using signal processing metrics and a perceptual test. The presented results show that we achieved an improvement in naturalness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naihan Li|AUTHOR Naihan Li]]^^1^^, [[Shujie Liu|AUTHOR Shujie Liu]]^^2^^, [[Yanqing Liu|AUTHOR Yanqing Liu]]^^2^^, [[Sheng Zhao|AUTHOR Sheng Zhao]]^^2^^, [[Ming Liu|AUTHOR Ming Liu]]^^1^^, [[Ming Zhou|AUTHOR Ming Zhou]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UESTC, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3999–4003&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To speed up the inference of neural speech synthesis, non-autoregressive models receive increasing attention recently. In non-autoregressive models, additional durations of text tokens are required to make a hard alignment between the encoder and the decoder. The duration-based alignment plays a crucial role since it controls the correspondence between text tokens and spectrum frames and determines the rhythm and speed of synthesized audio. To get better duration-based alignment and improve the quality of non-autoregressive speech synthesis, in this paper, we propose a novel neural alignment model named MoBoAligner. Given the pairs of the text and mel spectrum, MoBoAligner tries to identify the boundaries of text tokens in the given mel spectrum frames based on the token-frame similarity in the neural semantic space with an end-to-end framework. With these boundaries, durations can be extracted and used in the training of non-autoregressive TTS models. Compared with the duration extracted by TransformerTTS, MoBoAligner brings improvement for the non-autoregressive TTS model on MOS (3.74 comparing to FastSpeech’s 3.44). Besides, MoBoAligner is task-specified and lightweight, which reduces the parameter number by 45% and the training time consuming by 30%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dan Lim|AUTHOR Dan Lim]], [[Won Jang|AUTHOR Won Jang]], [[Gyeonghwan O|AUTHOR Gyeonghwan O]], [[Heayoung Park|AUTHOR Heayoung Park]], [[Bongwan Kim|AUTHOR Bongwan Kim]], [[Jaesam Yoon|AUTHOR Jaesam Yoon]]
</p><p class="cpabstractcardaffiliationlist">Kakao, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4004–4008&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose Jointly trained Duration Informed Transformer (JDI-T), a feed-forward Transformer with a duration predictor jointly trained without explicit alignments in order to generate an acoustic feature sequence from an input text. In this work, inspired by the recent success of the duration informed networks such as FastSpeech and DurIAN, we further simplify its sequential, two-stage training pipeline to a single-stage training. Specifically, we extract the phoneme duration from the autoregressive Transformer on the fly during the joint training instead of pretraining the autoregressive model and using it as a phoneme duration extractor. To our best knowledge, it is the first implementation to jointly train the feed-forward Transformer without relying on a pre-trained phoneme duration extractor in a single training pipeline. We evaluate the effectiveness of the proposed model on the publicly available Korean Single speaker Speech (KSS) dataset compared to the baseline text-to-speech (TTS) models trained by ESPnet-TTS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Masashi Aso|AUTHOR Masashi Aso]], [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]
</p><p class="cpabstractcardaffiliationlist">University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4009–4013&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents the use of unaligned multiple language units for end-to-end text-to-speech (TTS). End-to-end TTS is a promising technology in that it does not require intermediate representation such as prosodic contexts. However, it causes mispronunciation and unnatural prosody. To alleviate this problem, previous methods have used multiple language units, e.g., phonemes and characters, but required the units to be hard-aligned. In this paper, we propose a multi-input attention structure that simultaneously accepts multiple language units without alignments among them. We consider using not only traditional phonemes and characters but also subwords tokenized in a language-independent manner. We also propose a progressive training strategy to deal with the unaligned multiple language units. The experimental results demonstrated that our model and training strategy improve speech quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qingyun Dou|AUTHOR Qingyun Dou]], [[Joshua Efiong|AUTHOR Joshua Efiong]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4014–4018&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Auto-regressive sequence-to-sequence models with attention mechanisms have achieved state-of-the-art performance in various tasks including speech synthesis. Training these models can be difficult. The standard approach guides a model with the reference output history during training. However during synthesis the generated output history must be used. This mismatch can impact performance. Several approaches have been proposed to handle this, normally by selectively using the generated output history. To make training stable, these approaches often require a heuristic schedule or an auxiliary classifier. This paper introduces attention forcing, which guides the model with the generated output history and reference attention. This approach reduces the training-evaluation mismatch without the need for a schedule or a classifier. Additionally, for standard training approaches, the frame rate is often reduced to prevent models from copying the output history. As attention forcing does not feed the reference output history to the model, it allows using a higher frame rate, which improves the speech quality. Finally, attention forcing allows the model to generate output sequences aligned with the references, which is important for some down-stream tasks such as training neural vocoders. Experiments show that attention forcing allows doubling the frame rate, and yields significant gain in speech quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zi-qiang Zhang|AUTHOR Zi-qiang Zhang]], [[Yan Song|AUTHOR Yan Song]], [[Jian-shu Zhang|AUTHOR Jian-shu Zhang]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3580–3584&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Encoder-decoder based methods have become popular for automatic speech recognition (ASR), thanks to their simplified processing stages and low reliance on prior knowledge. However, large amounts of acoustic data with paired transcriptions is generally required to train an effective encoder-decoder model, which is expensive, time-consuming to be collected and not always readily available. However unpaired speech data is abundant, hence several semi-supervised learning methods, such as teacher-student (T/S) learning and pseudo-labeling, have recently been proposed to utilize this potentially valuable resource. In this paper, a novel T/S learning with conditional posterior distribution for encoder-decoder based ASR is proposed. Specifically, the 1-best hypotheses and the conditional posterior distribution from the teacher are exploited to provide more effective supervision. Combined with model perturbation techniques, the proposed method reduces WER by 19.2% relatively on the LibriSpeech benchmark, compared with a system trained using only paired data. This outperforms previous reported 1-best hypothesis results on the same task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ashtosh Sapru|AUTHOR Ashtosh Sapru]], [[Sri Garimella|AUTHOR Sri Garimella]]
</p><p class="cpabstractcardaffiliationlist">Amazon, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3585–3589&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art Acoustic Modeling (AM) techniques use long short term memory (LSTM) networks, and apply multiple phases of training on large amount of labeled acoustic data — initial cross-entropy (CE) training or connectionist temporal classification (CTC) training followed by sequence discriminative training, such as state-level Minimum Bayes Risk (sMBR). Recently, there is considerable interest in applying Semi-Supervised Learning (SSL) methods that leverage substantial amount of unlabeled speech for improving AM. This paper proposes a novel Teacher-Student based knowledge distillation (KD) approach for sequence discriminative training, where reference state sequence of unlabeled data are estimated using a strong Bi-directional LSTM Teacher model which is then used to guide the sMBR training of a LSTM Student model. We build a strong supervised LSTM AM baseline by using 45000 hours of labeled multi-dialect English data for initial CE or CTC training stage, and 11000 hours of its British English subset for sMBR training phase. To demonstrate the efficacy of the proposed approach, we leverage an additional 38000 hours of unlabeled British English data at only sMBR stage, which yields a relative Word Error Rate (WER) improvement in the range of 6%–11% over supervised baselines in clean and noisy test conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jinyu Li|AUTHOR Jinyu Li]]^^1^^, [[Rui Zhao|AUTHOR Rui Zhao]]^^1^^, [[Zhong Meng|AUTHOR Zhong Meng]]^^1^^, [[Yanqing Liu|AUTHOR Yanqing Liu]]^^2^^, [[Wenning Wei|AUTHOR Wenning Wei]]^^2^^, [[Sarangarajan Parthasarathy|AUTHOR Sarangarajan Parthasarathy]]^^1^^, [[Vadim Mazalov|AUTHOR Vadim Mazalov]]^^1^^, [[Zhenghao Wang|AUTHOR Zhenghao Wang]]^^1^^, [[Lei He|AUTHOR Lei He]]^^2^^, [[Sheng Zhao|AUTHOR Sheng Zhao]]^^2^^, [[Yifan Gong|AUTHOR Yifan Gong]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Microsoft, USA; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3590–3594&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Because of its streaming nature, recurrent neural network transducer (RNN-T) is a very promising end-to-end (E2E) model that may replace the popular hybrid model for automatic speech recognition. In this paper, we describe our recent development of RNN-T models with reduced GPU memory consumption during training, better initialization strategy, and advanced encoder modeling with future lookahead. When trained with Microsoft’s 65 thousand hours of anonymized training data, the developed RNN-T model surpasses a very well trained hybrid model with both better recognition accuracy and lower latency. We further study how to customize RNN-T models to a new domain, which is important for deploying E2E models to practical scenarios. By comparing several methods leveraging text-only data in the new domain, we found that updating RNN-T’s prediction and joint networks using text-to-speech generated from domain-specific text is the most effective.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xuankai Chang|AUTHOR Xuankai Chang]]^^1^^, [[Aswin Shanmugam Subramanian|AUTHOR Aswin Shanmugam Subramanian]]^^1^^, [[Pengcheng Guo|AUTHOR Pengcheng Guo]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^1^^, [[Yuya Fujita|AUTHOR Yuya Fujita]]^^2^^, [[Motoi Omachi|AUTHOR Motoi Omachi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^Yahoo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3595–3599&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformers have demonstrated state-of-the-art performance on many tasks in natural language processing and speech processing. One of the key components in Transformers is self-attention, which attends to the whole input sequence at every layer. However, the computational and memory cost of self-attention is square of the input sequence length, which is a major concern in automatic speech recognition (ASR) where the input sequence can be very long. In this paper, we propose to use a technique called adaptive span self-attention for ASR tasks, which is originally proposed for language modeling. Our method enables the network to learn an appropriate size and position of the window for each layer and head, and our newly introduced scheme can further control the window size depending on the future and past contexts. Thus, it can save both computational complexity and memory size from the square order of the input length to the adaptive linear order. We show the effectiveness of the proposed method by using several ASR tasks, and the proposed adaptive span methods consistently improved the performance from the conventional fixed span methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Egor Lakomkin|AUTHOR Egor Lakomkin]], [[Jahn Heymann|AUTHOR Jahn Heymann]], [[Ilya Sklyar|AUTHOR Ilya Sklyar]], [[Simon Wiesler|AUTHOR Simon Wiesler]]
</p><p class="cpabstractcardaffiliationlist">Amazon, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3600–3604&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Subwords are the most widely used output units in end-to-end speech recognition. They combine the best of two worlds by modeling the majority of frequent words directly and at the same time allow open vocabulary speech recognition by backing off to shorter units or characters to construct words unseen during training. However, mapping text to subwords is ambiguous and often multiple segmentation variants are possible. Yet, many systems are trained using only the most likely segmentation. Recent research suggests that sampling subword segmentations during training acts as a regularizer for neural machine translation and speech recognition models, leading to performance improvements. In this work, we conduct a principled investigation on the regularizing effect of the subword segmentation sampling method for a streaming end-to-end speech recognition task. In particular, we evaluate the subword regularization contribution depending on the size of the training dataset. Our results suggest that subword regularization provides a consistent improvement of 2–8% relative word-error-rate reduction, even in a large-scale setting with datasets up to a size of 20k hours. Further, we analyze the effect of subword regularization on recognition of unseen words and its implications on beam diversity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wilfried Michel|AUTHOR Wilfried Michel]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3605–3609&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sequence-to-sequence models with an implicit alignment mechanism (e.g. attention) are closing the performance gap towards traditional hybrid hidden Markov models (HMM) for the task of automatic speech recognition. One important factor to improve word error rate in both cases is the use of an external language model (LM) trained on large text-only corpora. Language model integration is straightforward with the clear separation of acoustic model and language model in classical HMM-based modeling. In contrast, multiple integration schemes have been proposed for attention models.

In this work, we present a novel method for language model integration into implicit-alignment based sequence-to-sequence models. Log-linear model combination of acoustic and language model is performed with a per-token renormalization. This allows us to compute the full normalization term efficiently both in training and in testing.

This is compared to a global renormalization scheme which is equivalent to applying shallow fusion in training.

The proposed methods show good improvements over standard model combination (shallow fusion) on our state-of-the-art Librispeech system. Furthermore, the improvements are persistent even if the LM is exchanged for a more powerful one after training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Han|AUTHOR Wei Han]], [[Zhengdong Zhang|AUTHOR Zhengdong Zhang]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Jiahui Yu|AUTHOR Jiahui Yu]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[James Qin|AUTHOR James Qin]], [[Anmol Gulati|AUTHOR Anmol Gulati]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[Yonghui Wu|AUTHOR Yonghui Wu]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3610–3614&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Convolutional neural networks (CNN) have shown promising results for end-to-end speech recognition, albeit still behind RNN/transformer based models in performance. In this paper, we study how to bridge this gap and go beyond with a novel CNN-RNN-transducer architecture, which we call ContextNet. ContextNet features a fully convolutional encoder that incorporates global context information into convolution layers by adding squeeze-and-excitation modules. In addition, we propose a simple scaling method that scales the widths of ContextNet that achieves good trade-off between computation and accuracy.

We demonstrate that on the widely used Librispeech benchmark, ContextNet achieves a word error rate (WER) of 2.1%/4.6% without external language model (LM), 1.9%/4.1% with LM and 2.9%/7.0% with only 10M parameters on the clean/noisy LibriSpeech test sets. This compares to the best previously published model of 2.0%/4.6% with LM and 3.9%/11.3% with 20M parameters. The superiority of the proposed ContextNet model is also verified on a much larger internal dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[David Rybach|AUTHOR David Rybach]], [[Basi García|AUTHOR Basi García]], [[Trevor Strohman|AUTHOR Trevor Strohman]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3615–3619&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Having end-to-end (E2E) models emit the start and end times of words //on-device// is important for various applications. This unsolved problem presents challenges with respect to model size, latency and accuracy. In this paper, we present an approach to word timings by constraining the attention head of the Listen, Attend, Spell (LAS) 2nd-pass rescorer [1]. On a Voice-Search task, we show that this approach does not degrade accuracy compared to when no attention head is constrained. In addition, it meets on-device size and latency constraints. In comparison, constraining the alignment with a 1st-pass Recurrent Neural Network Transducer (RNN-T) model to emit word timings results in quality degradation. Furthermore, a low-frame-rate conventional acoustic model [2], which is trained with a constrained alignment and is used in many applications for word timings, is slower to detect start and end times compared to our proposed 2nd-pass LAS approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Danni Liu|AUTHOR Danni Liu]], [[Gerasimos Spanakis|AUTHOR Gerasimos Spanakis]], [[Jan Niehues|AUTHOR Jan Niehues]]
</p><p class="cpabstractcardaffiliationlist">Universiteit Maastricht, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3620–3624&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Encoder-decoder models provide a generic architecture for sequence-to-sequence tasks such as speech recognition and translation. While offline systems are often evaluated on quality metrics like word error rates (WER) and BLEU scores, latency is also a crucial factor in many practical use-cases. We propose three latency reduction techniques for chunk-based incremental inference and evaluate their accuracy-latency tradeoff. On the 300-hour How2 dataset, we reduce latency by 83% to 0.8 second by sacrificing 1% WER (6% rel.) compared to offline transcription. Although our experiments use the Transformer, the partial hypothesis selection strategies are applicable to other encoder-decoder models. To reduce expensive re-computation as new chunks arrive, we propose to use a unidirectionally-attending encoder. After an adaptation procedure to partial sequences, the unidirectional model performs on par with the original model. We further show that our approach is also applicable to speech translation. On the How2 English-Portuguese speech translation dataset, we reduce latency to 0.7 second (-84% rel.) while incurring a loss of 2.4 BLEU points (5% rel.) compared to the offline system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ke Li|AUTHOR Ke Li]]^^1^^, [[Daniel Povey|AUTHOR Daniel Povey]]^^2^^, [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^Xiaomi, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3625–3629&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A cache-inspired approach is proposed for neural language models (LMs) to improve long-range dependency and better predict rare words from long contexts. This approach is a simpler alternative to attention-based pointer mechanism that enables neural LMs to reproduce words from recent history. Without using attention and mixture structure, the method only involves appending extra tokens that represent words in history to the output layer of a neural LM and modifying training supervisions accordingly. A memory-augmentation unit is introduced to learn words that are particularly likely to repeat. We experiment with both recurrent neural network- and Transformer-based LMs. Perplexity evaluation on Penn Treebank and WikiText-2 shows the proposed model outperforms both LSTM and LSTM with attention-based pointer mechanism and is more effective on rare words. N-best rescoring experiments on Switchboard indicate that it benefits both very rare and frequent words. However, it is challenging for the proposed model as well as two other models with attention-based pointer mechanism to obtain good overall WER reductions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abhilash Jain|AUTHOR Abhilash Jain]], [[Aku Rouhe|AUTHOR Aku Rouhe]], [[Stig-Arne Grönroos|AUTHOR Stig-Arne Grönroos]], [[Mikko Kurimo|AUTHOR Mikko Kurimo]]
</p><p class="cpabstractcardaffiliationlist">Aalto University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3630–3634&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, BERT and Transformer-XL based architectures have achieved strong results in a range of NLP applications. In this paper, we explore Transformer architectures — BERT and Transformer-XL — as a language model for a Finnish ASR task with different rescoring schemes.

We achieve strong results in both an intrinsic and an extrinsic task with Transformer-XL. Achieving 29% better perplexity and 3% better WER than our previous best LSTM-based approach. We also introduce a novel three-pass decoding scheme which improves the ASR performance by 8%. To the best of our knowledge, this is also the first work (i) to formulate an alpha smoothing framework to use the non-autoregressive BERT language model for an ASR task, and (ii) to explore sub-word units with Transformer-XL for an agglutinative language like Finnish.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hayato Futami|AUTHOR Hayato Futami]], [[Hirofumi Inaguma|AUTHOR Hirofumi Inaguma]], [[Sei Ueno|AUTHOR Sei Ueno]], [[Masato Mimura|AUTHOR Masato Mimura]], [[Shinsuke Sakai|AUTHOR Shinsuke Sakai]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3635–3639&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based sequence-to-sequence (seq2seq) models have achieved promising results in automatic speech recognition (ASR). However, as these models decode in a left-to-right way, they do not have access to context on the right. We leverage both left and right context by applying BERT as an external language model to seq2seq ASR through knowledge distillation. In our proposed method, BERT generates soft labels to guide the training of seq2seq ASR. Furthermore, we leverage context beyond the current utterance as input to BERT. Experimental evaluations show that our method significantly improves the ASR performance from the seq2seq baseline on the Corpus of Spontaneous Japanese (CSJ). Knowledge distillation from BERT outperforms that from a transformer LM that only looks at left context. We also show the effectiveness of leveraging context beyond the current utterance. Our method outperforms other LM application approaches such as n-best rescoring and shallow fusion, while it does not require extra inference cost.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]], [[Yu-Min Huang|AUTHOR Yu-Min Huang]]
</p><p class="cpabstractcardaffiliationlist">National Chiao Tung University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3640–3644&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sequential learning using recurrent neural network (RNN) has been popularly developed for language modeling. An alternative sequential learning was implemented by the temporal convolutional network (TCN) which is seen as a variant of one-dimensional convolutional neural network (CNN). In general, RNN and TCN are fitted to capture the long-term and the short-term features over natural sentences, respectively. This paper is motivated to fulfill TCN as the encoder to extract short-term dependencies and then use RNN as the decoder for language modeling where the dependencies are integrated in a long-term semantic fashion for word prediction. A new sequential learning based on the convolutional recurrent network (CRN) is developed to characterize the //local dependencies// as well as the //global semantics// in word sequences. Importantly, the stochastic modeling for CRN is proposed to facilitate model capacity in neural language model where the uncertainties in training sentences are represented for variational inference. The complementary benefits of CNN and RNN are merged in sequential learning where the latent variable space is constructed as a generative model for sequential prediction. Experiments on language modeling demonstrate the effectiveness of stochastic convolutional recurrent network relative to the other sequential machines in terms of perplexity and word error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jingjing Huo|AUTHOR Jingjing Huo]], [[Yingbo Gao|AUTHOR Yingbo Gao]], [[Weiyue Wang|AUTHOR Weiyue Wang]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3645–3649&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To encourage intra-class compactness and inter-class separability among trainable feature vectors, large-margin softmax methods are developed and widely applied in the face recognition community. The introduction of the large-margin concept into the softmax is reported to have good properties such as enhanced discriminative power, less overfitting and well-defined geometric intuitions. Nowadays, language modeling is commonly approached with neural networks using softmax and cross entropy. In this work, we are curious to see if introducing large-margins to neural language models would improve the perplexity and consequently word error rate in automatic speech recognition. Specifically, we first implement and test various types of conventional margins following the previous works in face recognition. To address the distribution of natural language data, we then compare different strategies for word vector norm-scaling. After that, we apply the best norm-scaling setup in combination with various margins and conduct neural language models rescoring experiments in automatic speech recognition. We find that although perplexity is slightly deteriorated, neural language models with large-margin softmax can yield word error rate similar to that of the standard softmax baseline. Finally, expected margins are analyzed through visualization of word vectors, showing that the syntactic and semantic relationships are also preserved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Da-Rong Liu|AUTHOR Da-Rong Liu]]^^1^^, [[Chunxi Liu|AUTHOR Chunxi Liu]]^^2^^, [[Frank Zhang|AUTHOR Frank Zhang]]^^2^^, [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]]^^2^^, [[Yatharth Saraf|AUTHOR Yatharth Saraf]]^^2^^, [[Geoffrey Zweig|AUTHOR Geoffrey Zweig]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Taiwan University; ^^2^^Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3650–3654&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Videos uploaded on social media are often accompanied with textual descriptions. In building automatic speech recognition (ASR) systems for videos, we can exploit the contextual information provided by such video metadata. In this paper, we explore ASR lattice rescoring by selectively attending to the video descriptions. We first use an attention based method to extract contextual vector representations of video metadata, and use these representations as part of the inputs to a neural language model during lattice rescoring. Secondly, we propose a hybrid pointer network approach to explicitly interpolate the word probabilities of the word occurrences in metadata. We perform experimental evaluations on both language modeling and ASR tasks, and demonstrate that both proposed methods provide performance improvements by selectively leveraging the video metadata.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yosuke Higuchi|AUTHOR Yosuke Higuchi]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^1^^, [[Nanxin Chen|AUTHOR Nanxin Chen]]^^1^^, [[Tetsuji Ogawa|AUTHOR Tetsuji Ogawa]]^^2^^, [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^Waseda University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3655–3659&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present Mask CTC, a novel //non-autoregressive// end-to-end automatic speech recognition (ASR) framework, which generates a sequence by refining outputs of the connectionist temporal classification (CTC). Neural sequence-to-sequence models are usually //autoregressive//: each output token is generated by conditioning on previously generated tokens, at the cost of requiring as many iterations as the output length. On the other hand, non-autoregressive models can simultaneously generate tokens within a constant number of iterations, which results in significant inference time reduction and better suits end-to-end ASR model for real-world scenarios. In this work, Mask CTC model is trained using a Transformer encoder-decoder with joint training of mask prediction and CTC. During inference, the target sequence is initialized with the greedy CTC outputs and low-confidence tokens are masked based on the CTC probabilities. Based on the conditional dependence between output tokens, these masked low-confidence tokens are then predicted conditioning on the high-confidence tokens. Experimental results on different speech recognition tasks show that Mask CTC outperforms the standard CTC model (e.g., 17.9% → 12.1% WER on WSJ) and approaches the autoregressive model, requiring much less inference time using CPUs (0.07 RTF in Python implementation). All of our codes are publicly available at https://github.com/espnet/espnet</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuya Fujita|AUTHOR Yuya Fujita]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^, [[Motoi Omachi|AUTHOR Motoi Omachi]]^^1^^, [[Xuankai Chang|AUTHOR Xuankai Chang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Yahoo, Japan; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3660–3664&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end (E2E) models have gained attention in the research field of automatic speech recognition (ASR). Many E2E models proposed so far assume left-to-right autoregressive generation of an output token sequence except for connectionist temporal classification (CTC) and its variants. However, left-to-right decoding cannot consider the future output context, and it is not always optimal for ASR. One of the non-left-to-right models is known as non-autoregressive Transformer (NAT) and has been intensively investigated in the area of neural machine translation (NMT) research. One NAT model, mask-predict, has been applied to ASR but the model needs some heuristics or additional component to estimate the length of the output token sequence. This paper proposes to apply another type of NAT called insertion-based models, that were originally proposed for NMT, to ASR tasks. Insertion-based models solve the above mask-predict issues and can generate an arbitrary generation order of an output sequence. In addition, we introduce a new formulation of joint training of the insertion-based models and CTC. This formulation reinforces CTC by making it dependent on insertion-based token generation in a non-autoregressive manner. We conducted experiments on three public benchmarks and achieved competitive performance to strong autoregressive Transformer with a similar decoding condition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yefei Chen|AUTHOR Yefei Chen]], [[Heinrich Dinkel|AUTHOR Heinrich Dinkel]], [[Mengyue Wu|AUTHOR Mengyue Wu]], [[Kai Yu|AUTHOR Kai Yu]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3665–3669&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Traditional supervised voice activity detection (VAD) methods work well in clean and controlled scenarios, with performance severely degrading in real-world applications. One possible bottleneck is that speech in the wild contains unpredictable noise types, hence frame-level label prediction is difficult, which is required for traditional supervised VAD training. In contrast, we propose a general-purpose VAD (GPVAD) framework, which can be easily trained from noisy data in a weakly supervised fashion, requiring only clip-level labels. We proposed two GPVAD models, one full (GPV-F), trained on 527 Audioset sound events, and one binary (GPV-B), only distinguishing speech and noise. We evaluate the two GPV models against a CRNN based standard VAD model (VAD-C) on three different evaluation protocols (clean, synthetic noise, real data). Results show that our proposed GPV-F demonstrates competitive performance in clean and synthetic scenarios compared to traditional VAD-C. Further, in real-world evaluation, GPV-F largely outperforms VAD-C in terms of frame-level evaluation metrics as well as segment-level ones. With a much lower requirement for frame-labeled data, the naive binary clip-level GPV-B model can still achieve comparable performance to VAD-C in real-world scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[S. Limonard|AUTHOR S. Limonard]], [[Catia Cucchiarini|AUTHOR Catia Cucchiarini]], [[R.W.N.M. van Hout|AUTHOR R.W.N.M. van Hout]], [[Helmer Strik|AUTHOR Helmer Strik]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3710–3714&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Reading software based on Automatic Speech Recognition (ASR) has been proposed as a possible supplement to traditional classroom instruction to help pupils achieve the required level of reading proficiency. However, the knowledge required to develop such software is not always available, especially for languages other than English. To this end, we analyzed a corpus containing speech material from Dutch native primary school pupils who read texts aloud at their mastery reading level. We investigated reading strategies, reading miscues, a novel reading miscue index and their relationship with AVI level (reading level) and gender. We found a significant effect of AVI level on reading miscue index, but did not find a decrease of reading miscue index as AVI level increased. Pupils mostly used lexical reading strategies, which seem to increase when AVI level increases. Miscues most frequently concerned low-frequency words with at least two syllables, and omitted and inserted words were generally high frequent, unstressed function words. These results provide insights that help design the content of reading interventions and that can contribute to developing and improving ASR-based reading software. We discuss the results in view of current trends in education and technology, and their implications for future research and development.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joohyung Lee|AUTHOR Joohyung Lee]], [[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Hoirin Kim|AUTHOR Hoirin Kim]]
</p><p class="cpabstractcardaffiliationlist">KAIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3670–3674&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice activity detection (VAD) is a challenging task in low signal-to-noise ratio (SNR) environment, especially in non-stationary noise. To deal with this issue, we propose a novel attention module that can be integrated in Long Short-Term Memory (LSTM). Our proposed attention module refines each LSTM layer’s hidden states so as to make it possible to adaptively focus on both time and frequency domain. Experiments are conducted on various noisy conditions using Aurora 4 database. Our proposed method obtains the 95.58% area under the ROC curve (AUC), achieving 22.05%relative improvement compared to baseline, with only 2.44% increase in the number of parameters. Besides, we utilize focal loss for alleviating the performance degradation caused by imbalance between speech and non-speech sections in training sets. The results show that the focal loss can improve the performance in various imbalance situations compared to the cross entropy loss, a commonly used loss function in VAD.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tianjiao Xu|AUTHOR Tianjiao Xu]], [[Hui Zhang|AUTHOR Hui Zhang]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]]
</p><p class="cpabstractcardaffiliationlist">Inner Mongolia University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3675–3679&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice activity detection (VAD) is essential for speech signal processing system, which desires low computational cost and high real-time processing. Likelihood ratio test (LRT) based VAD is a widely used and effective approach in many applications. However, it is still a challenge in low signal-to-noise ratio (SNR) and non-stationary noisy scenario. To cope with this challenge, we propose a supervised masking-based parameter estimation module with an adaptive threshold to improve the performance of a state-of-the-art LRT based VAD. Moreover, considering real-time processing, we compared the proposed with corresponding end-to-end supervised learning approaches in various model sizes. Experimental results show that the proposed method leads to consistently better performance than both of the existing LRT based method and end-to-end supervised learning based approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Avinash Kumar|AUTHOR Avinash Kumar]]^^1^^, [[S. Shahnawazuddin|AUTHOR S. Shahnawazuddin]]^^2^^, [[Waquar Ahmad|AUTHOR Waquar Ahmad]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NIT Sikkim, India; ^^2^^NIT Patna, India; ^^3^^NIT Calicut, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3680–3684&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we propose a novel and noise robust method for the detection of vowels in speech signals. The proposed approach combines variational mode decomposition (VMD) and non-local means (NLM) estimation for the detection of vowels in a speech sequence. The VMD algorithm is used to determine a number of variational mode functions (VMFs). The lower-order VMFs represent the frequency contents corresponding to vowel regions. Thus by combining the lower-order VMFs and reconstructing the speech signal back, the energy corresponding to the vowel regions is enhanced while the non-vowel regions are suppressed. At the same time, the ill-effect of noise is also reduced. Finally, as reported in an earlier work, application of NLM followed by convolution with first-order difference of Gaussian window is performed on the reconstructed signal to determine the vowel region. The performance of proposed approach for the task of detecting vowels in speech is compared with three existing techniques and observed to be superior under clean as well as noisy test conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marvin Lavechin|AUTHOR Marvin Lavechin]]^^1^^, [[Marie-Philippe Gill|AUTHOR Marie-Philippe Gill]]^^2^^, [[Ruben Bousbib|AUTHOR Ruben Bousbib]]^^1^^, [[Hervé Bredin|AUTHOR Hervé Bredin]]^^3^^, [[Leibny Paola Garcia-Perera|AUTHOR Leibny Paola Garcia-Perera]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LSCP (UMR 8554), France; ^^2^^UQAM, Canada; ^^3^^LIMSI (UPR 3251), France; ^^4^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3685–3689&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice activity detection is the task of detecting speech regions in a given audio stream or recording. First, we design a neural network combining trainable filters and recurrent layers to tackle voice activity detection directly from the waveform. Experiments on the challenging DIHARD dataset show that the proposed end-to-end model reaches state-of-the-art performance and outperforms a variant where trainable filters are replaced by standard cepstral coefficients. Our second contribution aims at making the proposed voice activity detection model robust to domain mismatch. To that end, a domain classification branch is added to the network and trained in an adversarial manner. The same DIHARD dataset, drawn from 11 different domains is used for evaluation under two scenarios. In the //in-domain// scenario where the training and test sets cover the exact same domains, we show that the domain-adversarial approach does not degrade performance of the proposed end-to-end model. In the //out-domain// scenario where the test domain is different from training domains, it brings a relative improvement of more than 10%. Finally, our last contribution is the provision of a fully reproducible open-source pipeline than can be easily adapted to other datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ayush Agarwal|AUTHOR Ayush Agarwal]], [[Jagabandhu Mishra|AUTHOR Jagabandhu Mishra]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]]
</p><p class="cpabstractcardaffiliationlist">IIT Dharwad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3690–3694&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Vowel onset point (VOP) is the location where the onset of vowel takes place in a given speech segment. Many speech processing applications need the information of VOP to extract features from the speech signal. In such cases the overall performance largely depends on the exact detection of VOP location. There are many algorithms proposed in the literature for the automatic detection of VOPs. Most of these methods assume that the given speech signal is produced at normal speech rate. All the parameters for smoothing speech signal evidence as well as hypothesizing VOPs are set accordingly. However, these parameter settings may not work well for variable speech rate conditions. This work proposes a dynamic first order Gaussian differentiator (FOGD) window based approach to overcome this issue. The proposed approach is evaluated using a subset of TIMIT dataset with manually marked ground truth VOPs. The evaluated performance of VOP detection by using the proposed approach shows improvement when compared with the existing approach at higher and lower speech rate conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenpeng Zheng|AUTHOR Zhenpeng Zheng]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Ning Cheng|AUTHOR Ning Cheng]], [[Jian Luo|AUTHOR Jian Luo]], [[Jing Xiao|AUTHOR Jing Xiao]]
</p><p class="cpabstractcardaffiliationlist">Ping An Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3695–3699&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice activity detection (VAD) makes a distinction between speech and non-speech and its performance is of crucial importance for speech based services. Recently, deep neural network (DNN)-based VADs have achieved better performance than conventional signal processing methods. The existed DNN-based models always handcrafted a fixed window to make use of the contextual speech information to improve the performance of VAD. However, the fixed window of contextual speech information can’t handle various unpredictable noise environments and highlight the critical speech information to VAD task. In order to solve this problem, this paper proposed an adaptive multiple receptive-field attention neural network, called MLNET, to finish VAD task. The MLNET leveraged multi-branches to extract multiple contextual speech information and investigated an effective attention block to weight the most crucial parts of the context for final classification. Experiments in real-world scenarios demonstrated that the proposed MLNET-based model outperformed other baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Felix Kreuk|AUTHOR Felix Kreuk]]^^1^^, [[Joseph Keshet|AUTHOR Joseph Keshet]]^^1^^, [[Yossi Adi|AUTHOR Yossi Adi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Bar-Ilan University, Israel; ^^2^^Facebook, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3700–3704&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a self-supervised representation learning model for the task of unsupervised phoneme boundary detection. The model is a convolutional neural network that operates directly on the raw waveform. It is optimized to identify spectral changes in the signal using the Noise-Contrastive Estimation principle. At test time, a peak detection algorithm is applied over the model outputs to produce the final boundaries. As such, the proposed model is trained in a fully unsupervised manner with no manual annotations in the form of target boundaries nor phonetic transcriptions. We compare the proposed approach to several unsupervised baselines using both TIMIT and Buckeye corpora. Results suggest that our approach surpasses the baseline models and reaches state-of-the-art performance on both data sets. Furthermore, we experimented with expanding the training set with additional examples from the Librispeech corpus. We evaluated the resulting model on distributions and languages that were not seen during the training phase (English, Hebrew and German) and showed that utilizing additional untranscribed data is beneficial for model performance. Our implementation is available at: https://github.com/felixkreuk/UnsupSeg</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Piotr Żelasko|AUTHOR Piotr Żelasko]]^^1^^, [[Laureano Moro-Velázquez|AUTHOR Laureano Moro-Velázquez]]^^1^^, [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]^^2^^, [[Odette Scharenborg|AUTHOR Odette Scharenborg]]^^3^^, [[Najim Dehak|AUTHOR Najim Dehak]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^University of Illinois at Urbana-Champaign, USA; ^^3^^Technische Universiteit Delft, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3705–3709&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Only a handful of the world’s languages are abundant with the resources that enable practical applications of speech processing technologies. One of the methods to overcome this problem is to use the resources existing in other languages to train a multilingual automatic speech recognition (ASR) model, which, intuitively, should learn some universal phonetic representations. In this work, we focus on gaining a deeper understanding of how general these representations might be, and how individual phones are getting improved in a multilingual setting. To that end, we select a phonetically diverse set of languages, and perform a series of monolingual, multilingual and crosslingual (zero-shot) experiments. The ASR is trained to recognize the International Phonetic Alphabet (IPA) token sequences. We observe significant improvements across all languages in the multilingual setting, and stark degradation in the crosslingual setting, where the model, among other errors, considers Javanese as a tone language. Notably, as little as 10 hours of the target language training data tremendously reduces ASR error rates. Our analysis uncovered that even the phones that are unique to a single language can benefit greatly from adding training data from other languages — an encouraging result for the low-resource speech community.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Heikki Rasilo|AUTHOR Heikki Rasilo]], [[Yannick Jadoul|AUTHOR Yannick Jadoul]]
</p><p class="cpabstractcardaffiliationlist">Vrije Universiteit Brussel, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3715–3719&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In several areas of speech research, articulatory models able to produce a wide variety of speech sounds, not specific to any language, are needed as a starting point. Such research fields include the studies of sound system emergence in populations, infant speech acquisition research, and speech inversion research. Here we approach the problem of exploring the possible acoustic outcomes of a dynamic articulatory model efficiently, and provide an entropy based measure for the diversity of the explored articulations. Our exploration algorithm incrementally clusters produced babble into a number of target articulations, aiming to produce maximally interesting acoustic outcomes. Consonant gestures are defined as a subset of articulatory parameters and are thus superposed on vowel context, to provide a coarticulation effect. We show that the proposed algorithm explores the acoustic domain more efficiently than random target selection, and clusters the articulatory domain into a number of usable articulatory targets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]
</p><p class="cpabstractcardaffiliationlist">BME, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3720–3724&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic-to-articulatory inversion (AAI) methods estimate articulatory movements from the acoustic speech signal, which can be useful in several tasks such as speech recognition, synthesis, talking heads and language tutoring. Most earlier inversion studies are based on point-tracking articulatory techniques (e.g. EMA or XRMB). The advantage of rtMRI is that it provides dynamic information about the full midsagittal plane of the upper airway, with a high ‘relative’ spatial resolution. In this work, we estimated midsagittal rtMRI images of the vocal tract for speaker dependent AAI, using MGC-LSP spectral features as input. We applied FC-DNNs, CNNs and recurrent neural networks, and have shown that LSTMs are the most suitable for this task. As objective evaluation we measured normalized MSE, Structural Similarity Index (SSIM) and its complex wavelet version (CW-SSIM). The results indicate that the combination of FC-DNNs and LSTMs can achieve smooth generated MR images of the vocal tract, which are similar to the original MRI recordings (average CW-SSIM: 0.94).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Narjes Bozorg|AUTHOR Narjes Bozorg]], [[Michael T. Johnson|AUTHOR Michael T. Johnson]]
</p><p class="cpabstractcardaffiliationlist">University of Kentucky, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3725–3729&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a novel deep autoregressive method for Acoustic-to-Articulatory Inversion called Articulatory-WaveNet. In traditional methods such as Gaussian Mixture Model-Hidden Markov Model (GMM-HMM), mapping the frame-level interdependency of observations has not been considered. We address this problem by introducing the Articulatory-WaveNet with dilated causal convolutional layers to predict the articulatory trajectories from acoustic feature sequences. This new model has an average Root Mean Square Error (RMSE) of 1.08mm and a correlation of 0.82 on the English speaker subset of the ElectroMagnetic Articulography-Mandarin Accented English (EMA-MAE) corpus. Articulatory-WaveNet represents an improvement of 59% for RMSE and 30% for correlation over the previous GMM-HMM based inversion model. To the best of our knowledge, this paper introduces the first application of a WaveNet synthesis approach to the problem of Acoustic-to-Articulatory Inversion, and results are comparable to or better than the best currently published systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ioannis K. Douros|AUTHOR Ioannis K. Douros]]^^1^^, [[Ajinkya Kulkarni|AUTHOR Ajinkya Kulkarni]]^^1^^, [[Chrysanthi Dourou|AUTHOR Chrysanthi Dourou]]^^2^^, [[Yu Xie|AUTHOR Yu Xie]]^^3^^, [[Jacques Felblinger|AUTHOR Jacques Felblinger]]^^4^^, [[Karyna Isaieva|AUTHOR Karyna Isaieva]]^^4^^, [[Pierre-André Vuissoz|AUTHOR Pierre-André Vuissoz]]^^4^^, [[Yves Laprie|AUTHOR Yves Laprie]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Loria (UMR 7503), France; ^^2^^NTUA, Greece; ^^3^^Zhongnan Hospital of Wuhan University, China; ^^4^^IADI (Inserm U1254), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3730–3734&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work we present an algorithm for synthesising pseudo rtMRI data of the vocal tract. rtMRI data on the midsagittal plane were used to synthesise target consonant-vowel (CV) using only a silence frame of the target speaker. For this purpose, several single speaker models were created. The input of the algorithm is a silence frame of both train and target speaker and the rtMRI data of the target CV. An image transformation is computed from each CV frame to the next one, creating a set of transformations that describe the dynamics of the CV production. Another image transformation is computed from the silence frame of train speaker to the silence frame of the target speaker and is used to adapt the set of transformations computed previously to the target speaker. The adapted set of transformations is applied to the silence of the target speaker to synthesise his/her CV pseudo rtMRI data. Synthesised images from multiple single speaker models are frame aligned and then averaged to create the final version of synthesised images. Synthesised images are compared with the original ones using image cross-correlation. Results show good agreement between the synthesised and the original images.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]^^1^^, [[Kele Xu|AUTHOR Kele Xu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BME, Hungary; ^^2^^NUDT, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3735–3739&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In speech production research, different imaging modalities have been employed to obtain accurate information about the movement and shaping of the vocal tract. Ultrasound is an affordable and non-invasive imaging modality with relatively high temporal and spatial resolution to study the dynamic behavior of tongue during speech production. However, a long-standing problem for ultrasound tongue imaging is the transducer misalignment during longer data recording sessions. In this paper, we propose a simple, yet effective, misalignment quantification approach. The analysis employs MSE distance and two similarity measurement metrics to identify the relative displacement between the chin and the transducer. We visualize these measures as a function of the timestamp of the utterances. Extensive experiments are conducted on a Hungarian and Scottish English child dataset. The results suggest that large values of Mean Square Error (MSE) and small values of Structural Similarity Index (SSIM) and Complex Wavelet SSIM indicate corruptions or issues during the data recordings, which can either be caused by transducer misalignment or lack of gel.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maud Parrot|AUTHOR Maud Parrot]], [[Juliette Millet|AUTHOR Juliette Millet]], [[Ewan Dunbar|AUTHOR Ewan Dunbar]]
</p><p class="cpabstractcardaffiliationlist">LSCP (UMR 8554), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3740–3744&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Reconstruction of articulatory trajectories from the acoustic speech signal has been proposed for improving speech recognition and text-to-speech synthesis. However, to be useful in these settings, articulatory reconstruction must be speaker-independent. Furthermore, as most research focuses on single, small data sets with few speakers, robust articulatory reconstruction could profit from combining data sets. Standard evaluation measures such as root mean squared error and Pearson correlation are inappropriate for evaluating the speaker-independence of models or the usefulness of combining data sets. We present a new evaluation for articulatory reconstruction which is independent of the articulatory data set used for training: the //phone discrimination// ABX task. We use the ABX measure to evaluate a bi-LSTM based model trained on three data sets (14 speakers), and show that it gives information complementary to standard measures, enabling us to evaluate the effects of data set merging, as well as the speaker independence of the model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lorenz Diener|AUTHOR Lorenz Diener]], [[Mehrdad Roustay Vishkasougheh|AUTHOR Mehrdad Roustay Vishkasougheh]], [[Tanja Schultz|AUTHOR Tanja Schultz]]
</p><p class="cpabstractcardaffiliationlist">Universität Bremen, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3745–3749&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a new open access corpus for the training and evaluation of EMG-to-Speech conversion systems based on array electromyographic recordings. The corpus is recorded with a recording paradigm closely mirroring realistic EMG-to-Speech usage scenarios, and includes evaluation data recorded from both audible as well as silent speech. The corpus consists of 9.5 hours of data, split into 12 sessions recorded from 8 speakers. Based on this corpus, we present initial benchmark results with a realistic online EMG-to-Speech conversion use case, both for the audible and silent speech subsets. We also present a method for drastically improving EMG-to-Speech system stability and performance in the presence of time-related artifacts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joshua Penney|AUTHOR Joshua Penney]], [[Felicity Cox|AUTHOR Felicity Cox]], [[Anita Szakay|AUTHOR Anita Szakay]]
</p><p class="cpabstractcardaffiliationlist">Macquarie University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3750–3754&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Glottalisation of coda stops is a recent change in Australian English. Previous studies have shown that speakers use glottalisation to signal coda stop voicelessness in production, and that listeners interpret glottalisation as cueing coda stop voicelessness in perception. As is to be expected for a recent change, younger speakers glottalise more than older speakers, but in perception both age groups appear to use glottalisation similarly. This study examines whether links between the production and perception of glottalisation exist at the level of the individual. We determined how frequently individuals used glottalisation in production, and analysed this against how heavily the same individuals weighted glottalisation in perception. Although differences have previously been found at the age group level, at the level of the individual we found no correlation between how heavily listeners weighted glottalisation in perception and how frequently they used glottalisation in production for either the younger or the older listeners. Nevertheless, we did find a small number of individuals who exhibited an alignment of their production and perception repertoires, which may suggest that only a small proportion of individuals exhibit a strong production-perception link, and we propose that these individuals may be important for driving the progression of change.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dao Zhou|AUTHOR Dao Zhou]]^^1^^, [[Longbiao Wang|AUTHOR Longbiao Wang]]^^1^^, [[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^2^^, [[Yibo Wu|AUTHOR Yibo Wu]]^^1^^, [[Meng Liu|AUTHOR Meng Liu]]^^1^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^1^^, [[Jianguo Wei|AUTHOR Jianguo Wei]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin University, China; ^^2^^NEC, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3800–3804&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a dynamic-margin softmax loss for the training of deep speaker embedding neural network. Our proposal is inspired by the additive-margin softmax (AM-Softmax) loss reported earlier. In AM-Softmax loss, a constant margin is used for all training samples. However, the angle between the feature vector and the ground-truth class center is rarely the same for all samples. Furthermore, the angle also changes during training. Thus, it is more reasonable to set a dynamic margin for each training sample. In this paper, we propose to dynamically set the margin of each training sample commensurate with the cosine angle of that sample, hence, the name dynamic-additive-margin softmax (DAM-Softmax) loss. More specifically, the smaller the cosine angle is, the larger the margin between the training sample and the corresponding class in the feature space should be to promote intra-class compactness. Experimental results show that the proposed DAM-Softmax loss achieves state-of-the-art performance on the VoxCeleb dataset by 1.94% in equal error rate (EER). In addition, our method also outperforms AM-Softmax loss when evaluated on the Speakers in the Wild (SITW) corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Magdalena Rybicka|AUTHOR Magdalena Rybicka]], [[Konrad Kowalczyk|AUTHOR Konrad Kowalczyk]]
</p><p class="cpabstractcardaffiliationlist">AGH UST, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3805–3809&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In various classification tasks the major challenge is in generating discriminative representation of classes. By proper selection of deep neural network (DNN) loss function we can encourage it to produce embeddings with increased inter-class separation and smaller intra-class distances. In this paper, we develop softmax-based cross-entropy loss function which adapts its parameters to the current training phase. The proposed solution improves accuracy up to 24% in terms of Equal Error Rate (EER) and minimum Detection Cost Function (minDCF). In addition, our proposal also accelerates network convergence compared with other state-of-the-art softmax-based losses. As an additional contribution of this paper, we adopt and subsequently modify the ResNet DNN structure for the speaker recognition task. The proposed ResNet network achieves relative gains of up to 32% and 15% in terms of EER and minDCF respectively, compared with the well-established Time Delay Neural Network (TDNN) architecture for x-vector extraction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Victoria Mingote|AUTHOR Victoria Mingote]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]
</p><p class="cpabstractcardaffiliationlist">Universidad de Zaragoza, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3810–3814&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a new approach for the enrollment process in a deep neural network (DNN) system which learns the speaker model by an optimization process. Most Speaker Verification (SV) systems extract representations for both the enrollment and test utterances called embeddings, and then, these systems usually apply a similarity metric or complex back-ends to carry out the verification process. Unlike previous works, we propose to take advantage of the knowledge acquired by a DNN to model the speakers from the training set since the last layer of the DNN can be seen as an embedding dictionary which represents train speakers. Thus, after the initial training phase, we introduce a new learnable vector for each enrollment speaker. Furthermore, to lead this training process, we employ a loss function more appropriate for verification, the approximated Detection Cost Function (//aDCF//) loss function. The new strategy to produce enrollment models for each target speaker was tested on the RSR-Part II database for text-dependent speaker verification, where the proposed approach outperforms the reference system based on directly averaging of the embeddings extracted from the enroll data using the network and the application of cosine similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seyyed Saeed Sarfjoo|AUTHOR Seyyed Saeed Sarfjoo]], [[Srikanth Madikeri|AUTHOR Srikanth Madikeri]], [[Petr Motlicek|AUTHOR Petr Motlicek]], [[Sébastien Marcel|AUTHOR Sébastien Marcel]]
</p><p class="cpabstractcardaffiliationlist">Idiap Research Institute, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3815–3819&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To adapt the speaker verification (SV) system to a target domain with limited data, this paper investigates the transfer learning of the model pre-trained on the source domain data. To that end, layer-by-layer adaptation with transfer learning from the initial and final layers of the pre-trained model is investigated. We show that the model adapted from the initial layers outperforms the model adapted from the final layers. Based on this evidence, and inspired by the works in image recognition field, we hypothesize that low-level convolutional neural network (CNN) layers characterize domain-specific component while high-level CNN layers are domain-independent and have more discriminative power. For adapting these domain-specific components, angular margin softmax (AMSoftmax) applied on the CNN-based implementation of the x-vector architecture. In addition, to reduce the problem of over-fitting on the limited target data, transfer learning on the batch norm layers is investigated. Mean shift and covariance estimation of batch norm allows to map the represented components of the target domain to the source domain. Using TDNN and E-TDNN versions of the x-vectors as baseline models, the adapted models on the development set of NIST SRE 2018 outperformed the baselines with relative improvements of 11.0 and 13.8%, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuheng Wei|AUTHOR Yuheng Wei]], [[Junzhao Du|AUTHOR Junzhao Du]], [[Hui Liu|AUTHOR Hui Liu]]
</p><p class="cpabstractcardaffiliationlist">Xidian University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3820–3824&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker recognition for unseen speakers out of the training dataset relies on the discrimination of speaker embedding. Recent studies use the angular softmax losses with angular margin penalties to enhance the intra-class compactness of speaker embedding, which achieve obvious performance improvement. However, the classification layer encounters the problem of dimension explosion in these losses with the growth of training speakers. In this paper, like the prototype network loss in the few-short learning and the generalized end-to-end loss, we optimize the cosine distances between speaker embeddings and their corresponding centroids rather than the weight vectors in the classification layer. For the intra-class compactness, we impose the additive angular margin to shorten the cosine distance between speaker embeddings belonging to the same speaker. Meanwhile, we also explicitly improve the inter-class separability by enlarging the cosine distance between different speaker centroids. Experiments show that our loss achieves comparable performance with the stat-of-the-art angular margin softmax loss in both verification and identification tasks and markedly reduces the training iterations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiawen Kang|AUTHOR Jiawen Kang]], [[Ruiqi Liu|AUTHOR Ruiqi Liu]], [[Lantian Li|AUTHOR Lantian Li]], [[Yunqi Cai|AUTHOR Yunqi Cai]], [[Dong Wang|AUTHOR Dong Wang]], [[Thomas Fang Zheng|AUTHOR Thomas Fang Zheng]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3825–3829&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Domain generalization remains a critical problem for speaker recognition, even with the state-of-the-art architectures based on deep neural nets. For example, a model trained on reading speech may largely fail when applied to scenarios of singing or movie. In this paper, we propose a domain-invariant projection to improve the generalizability of speaker vectors. This projection is a simple neural net and is trained following the Model-Agnostic Meta-Learning (MAML) principle, for which the objective is to classify speakers in one domain if it had been updated with speech data in another domain. We tested the proposed method on CNCeleb, a new dataset consisting of single-speaker multi-condition (SSMC) data. The results demonstrated that the MAML-based domain-invariant projection can produce more generalizable speaker vectors, and effectively improve the performance in unseen domains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Brecht Desplanques|AUTHOR Brecht Desplanques]], [[Jenthe Thienpondt|AUTHOR Jenthe Thienpondt]], [[Kris Demuynck|AUTHOR Kris Demuynck]]
</p><p class="cpabstractcardaffiliationlist">Ghent University, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3830–3834&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Current speaker verification techniques rely on a neural network to extract speaker representations. The successful x-vector architecture is a Time Delay Neural Network (TDNN) that applies statistics pooling to project variable-length utterances into fixed-length speaker characterizing embeddings. In this paper, we propose multiple enhancements to this architecture based on recent trends in the related fields of face verification and computer vision. Firstly, the initial frame layers can be restructured into 1-dimensional Res2Net modules with impactful skip connections. Similarly to SE-ResNet, we introduce Squeeze-and-Excitation blocks in these modules to explicitly model channel interdependencies. The SE block expands the temporal context of the frame layer by rescaling the channels according to global properties of the recording. Secondly, neural networks are known to learn hierarchical features, with each layer operating on a different level of complexity. To leverage this complementary information, we aggregate and propagate features of different hierarchical levels. Finally, we improve the statistics pooling module with channel-dependent frame attention. This enables the network to focus on different subsets of frames during each of the channel’s statistics estimation. The proposed ECAPA-TDNN architecture significantly outperforms state-of-the-art TDNN based systems on the VoxCeleb test sets and the 2019 VoxCeleb Speaker Recognition Challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenda Chen|AUTHOR Wenda Chen]]^^1^^, [[Jonathan Huang|AUTHOR Jonathan Huang]]^^2^^, [[Tobias Bocklet|AUTHOR Tobias Bocklet]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Intel, USA; ^^2^^Apple, USA; ^^3^^Intel, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3835–3839&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker recognition performance has been greatly improved with the emergence of deep learning. Deep neural networks show the capacity to effectively deal with impacts of noise and reverberation, making them attractive to far-field speaker recognition systems. The x-vector framework is a popular choice for generating speaker embeddings in recent literature due to its robust training mechanism and excellent performance in various test sets. In this paper, we start with early work on including invariant representation learning (IRL) to the loss function and modify the approach with centroid alignment (CA) and length variability cost (LVC) techniques to further improve robustness in noisy, far-field applications. This work mainly focuses on improvements for short-duration test utterances (1-8s). We also present improved results on long-duration tasks. In addition, this work discusses a novel self-attention mechanism. On the VOiCES far-field corpus, the combination of the proposed techniques achieves relative improvements of 7.0% for extremely short and 8.2% for full-duration test utterances on equal error rate (EER) over our baseline system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yiting Lu|AUTHOR Yiting Lu]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Yu Wang|AUTHOR Yu Wang]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3840–3844&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken language ‘grammatical error correction’ (GEC) is an important mechanism to help learners of a foreign language, here English, improve their spoken grammar. GEC is challenging for non-native spoken language due to interruptions from disfluent speech events such as repetitions and false starts and issues in strictly defining what is acceptable in spoken language. Furthermore there is little labelled data to train models. One way to mitigate the impact of speech events is to use a disfluency detection (DD) model. Removing the detected disfluencies converts the speech transcript to be closer to written language, which has significantly more labelled training data. This paper considers two types of approaches to leveraging DD models to boost spoken GEC performance. One is sequential, a separately trained DD model acts as a pre-processing module providing a more structured input to the GEC model. The second approach is to train DD and GEC models in an end-to-end fashion, simultaneously optimising both modules. Embeddings enable end-to-end models to have a richer information flow. Experimental results show that DD effectively regulates GEC input; end-to-end training works well when fine-tuned on limited labelled in-domain data; and improving DD by incorporating acoustic information helps improve spoken GEC.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sara Papi|AUTHOR Sara Papi]]^^1^^, [[Edmondo Trentin|AUTHOR Edmondo Trentin]]^^1^^, [[Roberto Gretter|AUTHOR Roberto Gretter]]^^2^^, [[Marco Matassoni|AUTHOR Marco Matassoni]]^^2^^, [[Daniele Falavigna|AUTHOR Daniele Falavigna]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Università di Siena, Italy; ^^2^^FBK, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3845–3849&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The paper copes with the task of automatic assessment of second language proficiency from the language learners’ spoken responses to test prompts. The task has significant relevance to the field of computer assisted language learning. The approach presented in the paper relies on two separate modules: (1) an automatic speech recognition system that yields text transcripts of the spoken interactions involved, and (2) a multiple classifier system based on deep learners that ranks the transcripts into proficiency classes. Different deep neural network architectures (both feed-forward and recurrent) are specialized over diverse representations of the texts in terms of: a reference grammar, the outcome of probabilistic language models, several word embeddings, and two bag-of-word models. Combination of the individual classifiers is realized either via a probabilistic pseudo-joint model, or via a neural mixture of experts. Using the data of the third Spoken CALL Shared Task challenge, the highest values to date were obtained in terms of three popular evaluation metrics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinhao Wang|AUTHOR Xinhao Wang]], [[Klaus Zechner|AUTHOR Klaus Zechner]], [[Christopher Hamill|AUTHOR Christopher Hamill]]
</p><p class="cpabstractcardaffiliationlist">Educational Testing Service, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3850–3854&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study aims to develop automatic models to provide accurate and actionable diagnostic feedback within the context of spoken language learning and assessment, in particular, targeting the content development skill. We focus on one type of test question widely used in speaking assessment where test takers are required to first listen to and/or read stimulus material and then create a spontaneous response to a question related to the stimulus. In a high-proficiency response, critical content from the source material — referred to as “key points” — should be properly covered. We propose Transformer-based models to automatically detect absent key points or location spans of key points present in a response. Furthermore, we introduce a multi-task learning approach to measure how well a key point is rendered within a response (quality score). Experimental results show that automatic models can surpass human expert performance on both tasks: for span detection, the system performance reached an F1 score of 74.5% (vs. human agreement of 68.3%); for quality score prediction, system performance reached a Pearson correlation coefficient (r) of 0.744 (vs. human agreement of 0.712). Finally, the proposed key point-based features can be used to predict speaking proficiency scores with a correlation of 0.730.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vyas Raina|AUTHOR Vyas Raina]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Kate M. Knill|AUTHOR Kate M. Knill]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3855–3859&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>There is an increasing demand for automated spoken language assessment (SLA) systems, partly driven by the performance improvements that have come from deep learning based approaches. One aspect of deep learning systems is that they do not require expert derived features, operating directly on the original signal such as a speech recognition (ASR) transcript. This, however, increases their potential susceptibility to adversarial attacks as a form of candidate malpractice. In this paper the sensitivity of SLA systems to a universal black-box attack on the ASR text output is explored. The aim is to obtain a single, universal phrase to maximally increase any candidate’s score. Four approaches to detect such adversarial attacks are also described. All the systems, and associated detection approaches, are evaluated on a free (spontaneous) speaking section from a Business English test. It is shown that on deep learning based SLA systems the average candidate score can be increased by almost one grade level using a single six word phrase appended to the end of the response hypothesis. Although these large gains can be obtained, they can be easily detected based on detection shifts from the scores of a “traditional” Gaussian Process based grader.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xixin Wu|AUTHOR Xixin Wu]]^^1^^, [[Kate M. Knill|AUTHOR Kate M. Knill]]^^2^^, [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]^^2^^, [[Andrey Malinin|AUTHOR Andrey Malinin]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Cambridge, UK; ^^2^^University of Cambridge, UK; ^^3^^Yandex, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3860–3864&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep learning has dramatically improved the performance of automated systems on a range of tasks including spoken language assessment. One of the issues with these deep learning approaches is that they tend to be overconfident in the decisions that they make, with potentially serious implications for deployment of systems for high-stakes examinations. This paper examines the use of ensemble approaches to improve both the reliability of the scores that are generated, and the ability to detect where the system has made predictions beyond acceptable errors. In this work assessment is treated as a regression problem. Deep density networks, and ensembles of these models, are used as the predictive models. Given an ensemble of models measures of uncertainty, for example the variance of the predicted distributions, can be obtained and used for detecting outlier predictions. However, these ensemble approaches increase the computational and memory requirements of the system. To address this problem the ensemble is distilled into a single mixture density network. The performance of the systems is evaluated on a free speaking prompt-response style spoken language assessment test. Experiments show that the ensembles and the distilled model yield performance gains over a single model, and have the ability to detect outliers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenchao Lin|AUTHOR Zhenchao Lin]]^^1^^, [[Ryo Takashima|AUTHOR Ryo Takashima]]^^1^^, [[Daisuke Saito|AUTHOR Daisuke Saito]]^^1^^, [[Nobuaki Minematsu|AUTHOR Nobuaki Minematsu]]^^1^^, [[Noriko Nakanishi|AUTHOR Noriko Nakanishi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Tokyo, Japan; ^^2^^Kobe Gakuin University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3865–3869&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Language teachers often claim that the goal of speech training should be intelligible enough pronunciations, not native-sounding ones, because some types of accented pronunciations are intelligible or comprehensible enough. However, if one aims to provide a technical framework of automatic assessment based on intelligibility or comprehensibility, s/he has to be faced with a big technical challenge. That is collection of L2 utterances with annotations based on these metrics. Further, learners always want to know which parts (words, morphemes, or syllables) in their speech should be corrected. This means that data collection needs a valid method of intelligibility annotation with fine granularity. In our previous studies, a new metric of //shadowability// was introduced, and it was shown experimentally to be highly correlated to perceived intelligibility or comprehensibility as well as it was explained theoretically to be potential to give annotations with fine granularity. In this paper, shadowability annotation with fine granularity is examined experimentally, and a new and more valid method of collecting shadowing utterances is introduced. Finally, we tentatively derive frame-based shadowability annotation for L2 utterances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu Bai|AUTHOR Yu Bai]], [[Ferdy Hubers|AUTHOR Ferdy Hubers]], [[Catia Cucchiarini|AUTHOR Catia Cucchiarini]], [[Helmer Strik|AUTHOR Helmer Strik]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3870–3874&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2020/MEDIA/2842" class="externallinkbutton" target="_blank">{{$:/causal/ZIP Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Learning to read is a prerequisite to participate in our knowledge society. Developing reading skills requires intensive practice with individual evaluation and guidance by teachers, which is not always feasible in traditional classroom instruction. Automatic Speech Recognition (ASR) technology could offer a solution, but so far it has been mostly used to follow children while reading and to provide correct word forms through text-to-speech technology. However, ASR could possibly be employed at earlier stages of learning to read when children are still in the process of developing decoding skills. Early evaluation through ASR and individualized feedback could help achieve more personalized and possibly more effective guidance, thus preventing reading problems and improving the process of reading development. 

In this paper we report on an explorative study in which an ASR-based system equipped with logging capabilities was developed and employed to evaluate decoding skills in Dutch first graders reading aloud, and to provide them with detailed, individualized feedback. The results indicate that ASR-based feedback leads to improved reading accuracy and speed and that the log-files provide useful information to enhance practice and feedback, thus paving the way for more personalized, technology-enriched approaches to reading instruction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dominika Woszczyk|AUTHOR Dominika Woszczyk]]^^1^^, [[Stavros Petridis|AUTHOR Stavros Petridis]]^^1^^, [[David Millard|AUTHOR David Millard]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Imperial College London, UK; ^^2^^University of Southampton, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3875–3879&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech recognition systems have improved dramatically over the last few years, however, their performance is significantly degraded for the cases of accented or impaired speech. This work explores domain adversarial neural networks (DANN) for speaker-independent speech recognition on the UAS dataset of dysarthric speech. The classification task on 10 spoken digits is performed using an end-to-end CNN taking raw audio as input. The results are compared to a speaker-adaptive (SA) model as well as speaker-dependent (SD) and multi-task learning models (MTL). The experiments conducted in this paper show that DANN achieves an absolute recognition rate of 74.91% and outperforms the baseline by 12.18%. Additionally, the DANN model achieves comparable results to the SA model’s recognition rate of 77.65%. We also observe that when labelled dysarthric speech data is available DANN and MTL perform similarly, but when they are not DANN performs better than MTL.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shunsuke Hidaka|AUTHOR Shunsuke Hidaka]], [[Yogaku Lee|AUTHOR Yogaku Lee]], [[Kohei Wakamiya|AUTHOR Kohei Wakamiya]], [[Takashi Nakagawa|AUTHOR Takashi Nakagawa]], [[Tokihiko Kaburagi|AUTHOR Tokihiko Kaburagi]]
</p><p class="cpabstractcardaffiliationlist">Kyushu University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3880–3884&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Perceptual evaluation of voice quality is widely used in laryngological practice, but it lacks reproducibility caused by inter- and intra-rater variability. This problem can be solved by automatic estimation of voice quality using machine learning. In the previous studies, conventional acoustic features, such as jitter, have often been employed as inputs. However, many of them are vulnerable to severe hoarseness because they assume a quasi-periodicity of voice. This paper investigated non-parametric features derived from amplitude and phase spectrograms. We applied the instantaneous phase correction proposed by Yatabe et al. (2018) to extract features that could be interpreted as indicators of non-sinusoidality. Specifically, we compared log amplitude, temporal phase variation, temporal complex value variation, and mel-scale versions of them. A deep neural network with a bidirectional GRU was constructed for each item of GRBAS Scale, a hoarseness evaluation method. The dataset was composed of 2545 samples of sustained vowel /a/ with the GRBAS scores labeled by an otolaryngologist. The results showed that the Hz-mel conversion improved the performance in almost all the case. The best scores were obtained when using temporal phase variation along the mel scale for Grade, Rough, Breathy, and Strained, and when using log mel amplitude for Asthenic.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]], [[Po-Chien Hsu|AUTHOR Po-Chien Hsu]]
</p><p class="cpabstractcardaffiliationlist">National Chiao Tung University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3885–3889&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Traditionally, task-oriented dialogue system is built by an autonomous agent which can be trained by reinforcement learning where the reward from environment is maximized. The agent is learned by updating the policy when the goal state is observed. However, in real world, the extrinsic reward is usually sparse or missing. The training efficiency is bounded. The system performance is degraded. It is challenging to tackle the issue of sample efficiency in sparse reward scenario for spoken dialogues. Accordingly, a dialogue agent needs additional information to update its policy even in the period when reward is absent in the environment. This paper presents a new dialogue agent which is learned by incorporating the intrinsic reward based on the information-theoretic approach via stochastic curiosity exploration. This agent encourages the exploration for future diversity based on a latent dynamic architecture which consists of encoder network, curiosity network, information network and policy network. The latent states and actions are drawn to predict stochastic transition for future. The curiosity learning are implemented with intrinsic reward in a metric of mutual information and prediction error in the predicted states and actions. Experiments on dialogue management using PyDial demonstrate the benefit by using the stochastic curiosity exploration.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Myeongho Jeong|AUTHOR Myeongho Jeong]], [[Seungtaek Choi|AUTHOR Seungtaek Choi]], [[Hojae Han|AUTHOR Hojae Han]], [[Kyungho Kim|AUTHOR Kyungho Kim]], [[Seung-won Hwang|AUTHOR Seung-won Hwang]]
</p><p class="cpabstractcardaffiliationlist">Yonsei University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3890–3894&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper studies dialogue response selection task. As state-of-the-arts are neural models requiring a large training set, data augmentation is essential to overcome the sparsity of observational annotation, where one observed response is annotated as gold. In this paper, we propose counterfactual augmentation, of considering whether unobserved utterances would “counterfactually” replace the labelled response, for the given context, and augment only if that is the case. We empirically show that our pipeline improves BERT-based models in two different response selection tasks without incurring annotation overheads.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongyin Luo|AUTHOR Hongyin Luo]]^^1^^, [[Shang-Wen Li|AUTHOR Shang-Wen Li]]^^2^^, [[James Glass|AUTHOR James Glass]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT, USA; ^^2^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3895–3899&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken dialog systems have seen applications in many domains, including medical for automatic conversational diagnosis. State-of-the-art dialog managers are usually driven by deep reinforcement learning models, such as deep Q networks (DQNs), which learn by interacting with a simulator to explore the entire action space since real conversations are limited. However, the DQN-based automatic diagnosis models do not achieve satisfying performances when adapted to new, unseen diseases with only a few training samples. In this work, we propose the Prototypical Q Networks (ProtoQN) as the dialog manager for the automatic diagnosis systems. The model calculates prototype embeddings with real conversations between doctors and patients, learning from them and simulator-augmented dialogs more efficiently. We create both supervised and few-shot learning tasks with the Muzhi corpus. Experiments showed that the ProtoQN significantly outperformed the baseline DQN model in both supervised and few-shot learning scenarios, and achieves state-of-the-art few-shot learning performances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Teakgyu Hong|AUTHOR Teakgyu Hong]]^^1^^, [[Oh-Woog Kwon|AUTHOR Oh-Woog Kwon]]^^2^^, [[Young-Kil Kim|AUTHOR Young-Kil Kim]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Naver, Korea; ^^2^^ETRI, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3900–3904&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To overcome the limitations of conventional pipeline-based task-oriented dialog systems, an end-to-end approach has been introduced. To date, many end-to-end task-oriented dialog systems have been proposed and these have shown good performance in various domains. However, those have some limitations such as the need for dialog state annotations. And there is also room for improvement for those systems. In this paper, we examine the issues of recent end-to-end task-oriented dialog systems and present a model that can handle these issues. The proposed model classifies a system utterance template in a retrieval-based manner and then generates the slot values in the template through a decoder. Also, we propose an unsupervised learning based template generation method that allows model training even in a domain where the templates are not given and the dialog information is not tagged. Our model obtains new state-of-the-art results on a restaurant search domain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenhao He|AUTHOR Zhenhao He]], [[Jiachun Wang|AUTHOR Jiachun Wang]], [[Jian Chen|AUTHOR Jian Chen]]
</p><p class="cpabstractcardaffiliationlist">SCUT, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3905–3909&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advances in neural sequence-to-sequence models have led to promising results for end-to-end task-oriented dialog generation. Such frameworks enable a decoder to retrieve knowledge from the dialog history and the knowledge base during generation. However, these models usually rely on learned word embeddings as entity representation, which is difficult to deal with the rare and unknown entities. In this work, we propose a novel enhanced entity representation (EER) to simultaneously obtain context-sensitive and structure-aware entity representation. Our proposed method enables the decoder to facilitate both the ability to fetch the relevant knowledge and the effectiveness of incorporating grounding knowledge into the dialog generation. Experimental results on two publicly available dialog datasets show that our model outperforms the state-of-the-art data-driven task-oriented dialog models. Moreover, we conduct an Out-of-Vocabulary (OOV) test to demonstrate the superiority of EER in handling common OOV problem.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Viet-Trung Dang|AUTHOR Viet-Trung Dang]], [[Tianyu Zhao|AUTHOR Tianyu Zhao]], [[Sei Ueno|AUTHOR Sei Ueno]], [[Hirofumi Inaguma|AUTHOR Hirofumi Inaguma]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3910–3914&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken language understanding, which extracts intents and/or semantic concepts in utterances, is conventionally formulated as a post-processing of automatic speech recognition. It is usually trained with oracle transcripts, but needs to deal with errors by ASR. Moreover, there are acoustic features which are related with intents but not represented with the transcripts. In this paper, we present an end-to-end model that directly converts speech into dialog acts without the deterministic transcription process. In the proposed model, the dialog act recognition network is conjunct with an acoustic-to-word ASR model at its latent layer before the softmax layer, which provides a distributed representation of word-level ASR decoding information. Then, the entire network is fine-tuned in an end-to-end manner. This allows for stable training as well as robustness against ASR errors. The model is further extended to conduct DA segmentation jointly. Evaluations with the Switchboard corpus demonstrate that the proposed method significantly improves dialog act recognition accuracy from the conventional pipeline framework.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yao Qian|AUTHOR Yao Qian]], [[Yu Shi|AUTHOR Yu Shi]], [[Michael Zeng|AUTHOR Michael Zeng]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3915–3919&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken language understanding (SLU) tries to decode an input speech utterance such that effective semantic actions can be taken to continue meaningful and interactive spoken dialog (SD). The performance of SLU, however, can be adversely affected by automatic speech recognition (ASR) errors. In this paper, we exploit transfer learning in a Generative pre-trained Transformer (GPT) to jointly optimize ASR error correction and semantic labeling in terms of dialog act and slot-value for a given user’s spoken response in the context of SD system (SDS). With the encoded ASR output and dialog history as context, a conditional generative model is trained to generate transcripts correction, dialog act, and slot-values successively. The proposed generation model is jointly optimized as a classification task, which utilizes the ground-truth and N-best hypotheses in a multi-task, discriminative learning. We evaluate its effectiveness on a public SD corpus used in the Second Dialog State Tracking Challenge. The results show that our generation model can achieve a relative word error rate reduction of 25.12% from that in the original ASR 1-best result, and a sentence error rate (SER) lower than the oracle result from the 10-best ASR hypotheses. The proposed approach of generating dialog acts and slot-values, instead of classification and tagging, is promising. The refined ASR hypotheses are critical for improving semantic label generation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinnuo Xu|AUTHOR Xinnuo Xu]]^^1^^, [[Yizhe Zhang|AUTHOR Yizhe Zhang]]^^2^^, [[Lars Liden|AUTHOR Lars Liden]]^^2^^, [[Sungjin Lee|AUTHOR Sungjin Lee]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Heriot-Watt University, UK; ^^2^^Microsoft, USA; ^^3^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3920–3924&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Although the data-driven approaches of some recent bot building platforms make it possible for a wide range of users to easily create dialogue systems, those platforms don’t offer tools for quickly identifying which log dialogues contain problems. Thus, in this paper, we (1) introduce a new task, log dialogue ranking, where the ranker places problematic dialogues higher (2) provide a collection of human-bot conversations in the restaurant inquiry task labelled with dialogue quality for ranker training and evaluation (3) present a detailed description of the data collection pipeline, which is entirely based on crowd-sourcing (4) finally report a benchmark result of dialogue ranking, which shows the usability of the data and sets a baseline for future studies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pavlos Papadopoulos|AUTHOR Pavlos Papadopoulos]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]
</p><p class="cpabstractcardaffiliationlist">University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4029–4033&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement under unseen noise conditions is a challenging task, but essential for meeting the increasing demand for speech technologies to operate in diverse and dynamic real world environments. A method that has been widely used to enhance speech signals is nonnegative matrix factorization (NMF). In the training phase NMF produces speech and noise dictionaries which are represented as matrices with nonnegative entries. The quality of the enhanced signal depends on the reconstruction ability of the dictionaries. A geometric interpretation of these nonnegative matrices enables us to cast them as convex polyhedral cones in the positive orthant. In this work, we employ conic affinity measures to design systems able to operate in unseen noise conditions, by selecting an appropriate noise dictionary amongst a pool of potential candidates. We show that such a method yields results similar to those that would be produced if the oracle noise dictionary was used.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shichao Hu|AUTHOR Shichao Hu]], [[Bin Zhang|AUTHOR Bin Zhang]], [[Beici Liang|AUTHOR Beici Liang]], [[Ethan Zhao|AUTHOR Ethan Zhao]], [[Simon Lui|AUTHOR Simon Lui]]
</p><p class="cpabstractcardaffiliationlist">Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4074–4078&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audio super-resolution is a challenging task of recovering the missing high-resolution features from a low-resolution signal. To address this, generative adversarial networks (GAN) have been used to achieve promising results by training the mappings between magnitudes of the low and high-frequency components. However, phase information is not well-considered for waveform reconstruction in conventional methods. In this paper, we tackle the problem of music super-resolution and conduct a thorough investigation on the importance of phase for this task. We use GAN to predict the magnitudes of the high-frequency components. The corresponding phase information can be extracted using either a GAN-based waveform synthesis system or a modified Griffin-Lim algorithm. Experimental results show that phase information plays an important role in the improvement of the reconstructed music quality. Moreover, our proposed method significantly outperforms other state-of-the-art methods in terms of objective evaluations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yunyun Ji|AUTHOR Yunyun Ji]]^^1^^, [[Longting Xu|AUTHOR Longting Xu]]^^2^^, [[Wei-Ping Zhu|AUTHOR Wei-Ping Zhu]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Agora, China; ^^2^^Donghua University, China; ^^3^^Concordia University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4034–4038&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose an adversarial dictionary learning method to train a speaker independent speech dictionary and a universal noise dictionary for improving the generality of the dictionary learning based speech enhancement system. In the learning stage, two discriminators are employed separately to identify the components in speech and noise which are highly correlated with each other. The residuals in the speech and noise magnitude spectral matrices are then utilized to train the speech and noise dictionaries via the alternating direction method of multiplier algorithm, which can effectively reduce the mutual coherence between speech and noise. In the enhancement stage, a new optimization technique is proposed for enhancing the speech based on the low-rank decomposition and sparse coding. Experimental results show that our proposed method achieves better performance in improving the speech quality and intelligibility than the reference methods in terms of three objective performance evaluation measures.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shogo Seki|AUTHOR Shogo Seki]], [[Moe Takada|AUTHOR Moe Takada]], [[Tomoki Toda|AUTHOR Tomoki Toda]]
</p><p class="cpabstractcardaffiliationlist">Nagoya University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4039–4043&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a semi-supervised method for enhancing and suppressing self-produced speech, using a variational autoencoder (VAE) to jointly model self-produced speech recorded with air- and body-conductive microphones. In speech enhancement and suppression for self-produced speech, body-conducted signals can be used as an acoustical clue since they are robust against external noise and include self-produced speech predominantly. We have previously developed a semi-supervised method taking an improved source modeling approach called the joint source modeling, which can capture a nonlinear correspondence of air- and body-conducted signals using non-negative matrix factorization (NMF). This allows enhanced and suppressed air-conducted self-produced speech to be prevented from contaminating by the characteristics of body-conducted signals. However, our previous method employs a rank-1 spatial model, which is effective but difficult to consider in more practical situations. Furthermore, joint source modeling depends on the representation capability of NMF. As a result, enhancement and suppression performances are limited. To overcome these limitations, this paper employs a full-rank spatial model and proposes a joint source modeling of air- and body-conducted signals using a VAE, which has shown to represent source signals more accurately than NMF. Experimental results revealed that the proposed method outperformed baseline methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ran Weisman|AUTHOR Ran Weisman]]^^1^^, [[Vladimir Tourbabin|AUTHOR Vladimir Tourbabin]]^^2^^, [[Paul Calamia|AUTHOR Paul Calamia]]^^2^^, [[Boaz Rafaely|AUTHOR Boaz Rafaely]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BGU, Israel; ^^2^^Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4044–4048&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A wide range of applications in speech and audio signal processing incorporate a model of room reverberation based on the spatial covariance matrix (SCM). Typically, a diffuse sound field model is used, but although the diffuse model simplifies formulations, it may lead to limited accuracy in realistic sound fields, resulting in potential degradation in performance. While some extensions to the diffuse field SCM recently have been presented, accurate modeling for real sound fields remains an open problem. In this paper, a method for estimating the SCM of reverberant speech is proposed, based on the selection of time-frequency bins dominated by reverberation. The method is data-based and estimates the SCM for a specific acoustic scene. It is therefore applicable to realistic reverberant fields. An application of the proposed method to optimal beamforming for speech enhancement is presented, using the plane wave density function in the spherical harmonics (SH) domain. It is shown that the use of the proposed SCM outperforms the commonly used diffuse field SCM, suggesting the method is more successful in capturing the statistics of the late part of the reverberation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Minh Tri Ho|AUTHOR Minh Tri Ho]]^^1^^, [[Jinyoung Lee|AUTHOR Jinyoung Lee]]^^1^^, [[Bong-Ki Lee|AUTHOR Bong-Ki Lee]]^^2^^, [[Dong Hoon Yi|AUTHOR Dong Hoon Yi]]^^2^^, [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Yonsei University, Korea; ^^2^^LG Electronics, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4049–4053&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a novel architecture for multi-channel speech enhancement using a cross-channel attention-based Wave-U-Net structure. Despite the advantages of utilizing spatial information as well as spectral information, it is challenging to effectively train a multi-channel deep learning system in an end-to-end framework. With a channel-independent encoding architecture for spectral estimation and a strategy to extract spatial information through an inter-channel attention mechanism, we implement a multi-channel speech enhancement system that has high performance even in reverberant and extremely noisy environments. Experimental results show that the proposed architecture has superior performance in terms of signal-to-distortion ratio improvement (SDRi), short-time objective intelligence (STOI), and phoneme error rate (PER) for speech recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Igor Fedorov|AUTHOR Igor Fedorov]]^^1^^, [[Marko Stamenovic|AUTHOR Marko Stamenovic]]^^2^^, [[Carl Jensen|AUTHOR Carl Jensen]]^^2^^, [[Li-Chia Yang|AUTHOR Li-Chia Yang]]^^2^^, [[Ari Mandell|AUTHOR Ari Mandell]]^^2^^, [[Yiming Gan|AUTHOR Yiming Gan]]^^3^^, [[Matthew Mattina|AUTHOR Matthew Mattina]]^^1^^, [[Paul N. Whatmough|AUTHOR Paul N. Whatmough]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Arm, USA; ^^2^^Bose, USA; ^^3^^University of Rochester, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4054–4058&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modern speech enhancement algorithms achieve remarkable noise suppression by means of large recurrent neural networks (RNNs). However, large RNNs limit practical deployment in hearing aid hardware (HW) form-factors, which are battery powered and run on resource-constrained microcontroller units (MCUs) with limited memory capacity and compute capability. In this work, we use model compression techniques to bridge this gap. We define the constraints imposed on the RNN by the HW and describe a method to satisfy them. Although model compression techniques are an active area of research, we are the first to demonstrate their efficacy for RNN speech enhancement, using pruning and integer quantization of weights/activations. We also demonstrate state update skipping, which reduces the computational load. Finally, we conduct a perceptual evaluation of the compressed models to verify audio quality on human raters. Results show a reduction in model size and operations of 11.9× and 2.9×, respectively, over the baseline for compressed models, without a statistical difference in listening preference and only exhibiting a loss of 0.55dB SDR. Our model achieves a computational latency of 2.39ms, well within the 10ms target and 351× better than previous work.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shu Hikosaka|AUTHOR Shu Hikosaka]]^^1^^, [[Shogo Seki|AUTHOR Shogo Seki]]^^1^^, [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]]^^1^^, [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]]^^1^^, [[Kazuya Takeda|AUTHOR Kazuya Takeda]]^^1^^, [[Hideki Banno|AUTHOR Hideki Banno]]^^2^^, [[Tomoki Toda|AUTHOR Tomoki Toda]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nagoya University, Japan; ^^2^^Meijo University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4059–4063&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a speech waveform modification method which incorporates a hearing impairment simulator, to improve speech intelligibility for the hearing-impaired. The settings of hearing aid devices usually need to be manually adjusted to suit the needs of each user, which creates a significant burden. To address this issue, the proposed method creates a spectral shaping filter, using a hearing impairment simulator capable of estimating speech signals as perceived by a specific hearing-impaired person. We conduct objective and subjective evaluations through simulations using the hearing impairment simulator. Our experimental results demonstrate that; 1) the proposed spectral shaping filter can significantly improve both speech intelligibility and quality, 2) the filter can be combined with a well-known speech intelligibility enhancement technique based on power compensation using dynamic range compression (DRC), and 3) speech intelligibility can be further improved by controlling the trade-off between filtering and DRC-based power compensation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nana Hou|AUTHOR Nana Hou]]^^1^^, [[Chenglin Xu|AUTHOR Chenglin Xu]]^^1^^, [[Van Tung Pham|AUTHOR Van Tung Pham]]^^2^^, [[Joey Tianyi Zhou|AUTHOR Joey Tianyi Zhou]]^^3^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTU, Singapore; ^^2^^NTU, Singapore; ^^3^^A*STAR, Singapore; ^^4^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4064–4068&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech bandwidth extension aims to generate a wideband signal from a narrowband (low-band) input by predicting the missing high-frequency components. It is believed that the general knowledge about the speaker and phonetic content strengthens the prediction. In this paper, we propose to augment the low-band acoustic features with i-vector and phonetic posteriorgram (PPG), which represent speaker and phonetic content of the speech, respectively. We also propose a residual dual-path network (RDPN) as the core module to process the augmented features, which fully utilizes the utterance-level temporal continuity information and avoids gradient vanishing. Experiments show that the proposed method achieves 20.2% and 7.0% relative improvements over the best baseline in terms of log-spectral distortion (LSD) and signal-to-noise ratio (SNR), respectively. Furthermore, our method is 16 times more compact than the best baseline in terms of the number of parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nana Hou|AUTHOR Nana Hou]]^^1^^, [[Chenglin Xu|AUTHOR Chenglin Xu]]^^1^^, [[Joey Tianyi Zhou|AUTHOR Joey Tianyi Zhou]]^^2^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTU, Singapore; ^^2^^A*STAR, Singapore; ^^3^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4069–4073&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Bandwidth extension aims to reconstruct wideband speech signals from narrowband inputs to improve perceptual quality. Prior studies mostly perform bandwidth extension under the assumption that the narrowband signals are clean without noise. The use of such extension techniques is greatly limited in practice when signals are corrupted by noise. To alleviate such problem, we propose an end-to-end time-domain framework for noise-robust bandwidth extension, that jointly optimizes a mask-based speech enhancement and an ideal bandwidth extension module with multi-task learning. The proposed framework avoids decomposing the signals into magnitude and phase spectra, therefore, requires no phase estimation. Experimental results show that the proposed method achieves 14.3% and 15.8% relative improvements over the best baseline in terms of perceptual evaluation of speech quality (PESQ) and log-spectral distortion (LSD), respectively. Furthermore, our method is 3 times more compact than the best baseline in terms of the number of parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kevin Hirschi|AUTHOR Kevin Hirschi]]^^1^^, [[Okim Kang|AUTHOR Okim Kang]]^^1^^, [[Catia Cucchiarini|AUTHOR Catia Cucchiarini]]^^2^^, [[John H.L. Hansen|AUTHOR John H.L. Hansen]]^^3^^, [[Keelan Evanini|AUTHOR Keelan Evanini]]^^4^^, [[Helmer Strik|AUTHOR Helmer Strik]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northern Arizona University, USA; ^^2^^Radboud Universiteit, The Netherlands; ^^3^^University of Texas at Dallas, USA; ^^4^^Educational Testing Service, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4452–4456&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The use of Mobile-Assisted Pronunciation Training (MAPT) has been increasing drastically due to the personal and interactive nature of mobile devices. However, MAPT applications lack support from empirical evidence as research on MAPT-based acquisition, particularly related to prosody, has been rare. The present study employs a MAPT application with lessons on lexical stress and prominence with Limited English Proficiency (LEP) users (n = 31) of mixed ages and first languages. Then, 16 experienced raters conducted discourse-based prosodic analysis on unconstrained speech collected at the beginning and the end of the intervention. A series of mixed-effect model analyses were conducted on learner effort, improvement and learner background to investigate their relationship with accentedness and comprehensibility. The results indicated that present MAPT prosody interventions were effective for comprehensibility but not accentedness, however, learner effort on lexical stress and prominence exhibit differing patterns. Similar to previous findings, learner age impacts production more than the length of residency or history of language study. Implications include a prosody-based MAPT application; support for the treatment of accentedness and comprehensibility as separate, but related constructs; and a further understanding of the role of learner-related factors in prosody intervention.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel R. van Niekerk|AUTHOR Daniel R. van Niekerk]]^^1^^, [[Anqi Xu|AUTHOR Anqi Xu]]^^1^^, [[Branislav Gerazov|AUTHOR Branislav Gerazov]]^^2^^, [[Paul K. Krug|AUTHOR Paul K. Krug]]^^3^^, [[Peter Birkholz|AUTHOR Peter Birkholz]]^^3^^, [[Yi Xu|AUTHOR Yi Xu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University College London, UK; ^^2^^UKiM, Macedonia; ^^3^^Technische Universität Dresden, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4457–4461&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, a state-of-the-art articulatory speech synthesiser was used as the basis for simulating the exploration of CV sounds imitating speech stimuli. By adopting a relevant kinematic model and systematically reducing the search space of consonant articulatory targets, intelligible CV sounds can be found. Derivative-free optimisation strategies were evaluated to speed up the process of exploring articulatory space and the possibility of using automatic speech recognition as a means of evaluating intelligibility was explored.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Venkat Krishnamohan|AUTHOR Venkat Krishnamohan]]^^1^^, [[Akshara Soman|AUTHOR Akshara Soman]]^^1^^, [[Anshul Gupta|AUTHOR Anshul Gupta]]^^2^^, [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^Mercedes-Benz, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4462–4466&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audiovisual correspondence learning is the task of acquiring the association between images and its corresponding audio. In this paper, we propose a novel experimental paradigm in which unfamiliar pseudo images and pseudowords in audio form are introduced to both humans and machine systems. The task is to learn the association between the pairs of image and audio which is later evaluated with a retrieval task. The machine system used in the study is pretrained with the ImageNet corpus along with the corresponding audio labels. This model is transfer learned for the new image-audio pairs. Using the proposed paradigm, we perform a direct comparison of one-shot, two-shot and three-shot learning performance for humans and machine systems. The human behavioral experiment confirms that the majority of the correspondence learning happens in the first exposure of the audio-visual pair. This paper proposes a machine model which performs on par with the humans in audiovisual correspondence learning. But compared to the machine model, humans exhibited better generalization ability for new input samples with a single exposure.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yizhou Lan|AUTHOR Yizhou Lan]]
</p><p class="cpabstractcardaffiliationlist">Shenzhen University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4467–4470&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>70 Mandarin-speaking advanced learners of English (level B2 and above) participated in a perceptual identification experiment eliciting their preferred Mandarin equivalent classifications of English fricatives and affricates (/s, ʃ, ʧ, ʤ, tr, dr, ʒ/) along with fitness rates. The degree of mapping between Mandarin and English consonants, ranging from poor to fair, and good, were compared against predictions by the Perceptual Learning Model, a theoretic model that predicts learning outcomes by phonetic distances. Overall, the perceived phonetic distances between Mandarin and English consonants predicted the learners’ correct identification of the L2 consonants except for a few number of individual sounds. The Findings suggest that phonetic similarity do predict most mappings as the learning models postulate, but other factors such as articulatory proximity and orthographic influences should be considered, too.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kimiko Tsukada|AUTHOR Kimiko Tsukada]]^^1^^, [[Joo-Yeon Kim|AUTHOR Joo-Yeon Kim]]^^2^^, [[Jeong-Im Han|AUTHOR Jeong-Im Han]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Macquarie University, Australia; ^^2^^Konkuk University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4471–4475&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The perception of Japanese consonant length contrasts (i.e. short/singleton vs long/geminate) by native and non-native listeners was compared to examine the extent to which difficult foreign language (FL) sounds are processed accurately by native speakers of Korean (NK). Three NK groups differed in their experience with Japanese: non-learners, intermediate and advanced. Via the AXB task, the NK speakers’ discrimination accuracy of Japanese consonant length contrasts was assessed and compared to that of a group of 10 native speakers of Japanese (NJ) who served as controls. On average, the NK advanced group did not significantly differ from the NJ group and outperformed the NK non-learner (but not the NK intermediate) group. The NK intermediate and non-learner groups did not differ from each other. However, regardless of experience with Japanese, the NK speakers may benefit from the first language (L1) laryngeal contrasts, associating L1 Korean fortis consonants with Japanese geminates. The NK advanced group appeared less affected than the other two NK groups by Japanese pitch accent patterns in their consonant length perception. The NK advanced learners’ results demonstrate that it is possible for non-native speakers to acquire native-like discrimination of consonant length in adulthood.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Si-Ioi Ng|AUTHOR Si-Ioi Ng]], [[Tan Lee|AUTHOR Tan Lee]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4476–4480&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech sound disorder (SSD) refers to the developmental disorder in which children encounter persistent difficulties in correctly pronouncing words. Assessment of SSD has been relying largely on trained speech and language pathologists (SLPs). With the increasing demand for and long-lasting shortage of SLPs, automated assessment of speech disorder becomes a highly desirable approach to assisting clinical work. This paper describes a study on automatic detection of phonological errors in Cantonese speech of kindergarten children, based on a newly collected large speech corpus. The proposed approach to speech error detection involves the use of a Siamese recurrent autoencoder, which is trained to learn the similarity and discrepancy between phone segments in the embedding space. Training of the model requires only speech data from typically developing (TD) children. To distinguish disordered speech from typical one, cosine distance between the embeddings of the test segment and the reference segment is computed. Different model architectures and training strategies are experimented. Results on detecting the 6 most common consonant errors demonstrate satisfactory performance of the proposed model, with the average precision value from 0.82 to 0.93.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongwei Ding|AUTHOR Hongwei Ding]]^^1^^, [[Binghuai Lin|AUTHOR Binghuai Lin]]^^2^^, [[Liyuan Wang|AUTHOR Liyuan Wang]]^^2^^, [[Hui Wang|AUTHOR Hui Wang]]^^1^^, [[Ruomei Fang|AUTHOR Ruomei Fang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SJTU, China; ^^2^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4481–4485&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prosodic speech characteristics are important in the evaluation of both intelligibility and naturalness of oral English proficiency levels for learners of English as a Second Language (ESL). Different stress patterns between English and Mandarin Chinese have been an important research topic for L2 (second language) English speech learning. However, previous studies seldom employed children as ESL learners on this topic. Since more and more children start to learn English in the primary school in China, the current study aims to examine the L2 English rhythm of these child learners. We carefully selected 273 English utterances from a speech database produced by both native speakers and Mandarin child learners, and measured the rhythmic correlates. Results suggested that vowel-related metrics (e.g. //nPVI//) are better indexes for L2 rhythmic evaluation, which is similar for ESL adults; pause-related fluency is another indication for prosodic assessment, especially for child ESL learners. This investigation could shed some light on the rhythmic difficulties for Mandarin ESL child learners and provide some implications for ESL prosody teaching for school children.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chao Zhou|AUTHOR Chao Zhou]]^^1^^, [[Silke Hamann|AUTHOR Silke Hamann]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidade de Lisboa, Portugal; ^^2^^Universiteit van Amsterdam, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4486–4490&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prior research has revealed that L1-Mandarin learners employed position-dependent repair strategies for European Portuguese /l/ and /ɾ/. In this study we examined whether this L2 prosodic effect can be attributed to a cross-linguistic influence and whether the replacement of the Portuguese rhotic by the Mandarin [ɹ] is due to perception or orthography. We performed a delayed imitation task with naïve Mandarin listeners and manipulated the presented input types (auditory form alone or a combination of auditory and written forms). Results showed that naïve responses were reminiscent of L1-Mandarin learners’ behaviour, and that [ɹ] was used almost exclusively in the presence of written input, suggesting that the prosodic effect attested in L2 acquisition of European Portuguese /l/ and /ɾ/ stems from cross-linguistic interaction between phonological categorization and orthography.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenqian Li|AUTHOR Wenqian Li]]^^1^^, [[Jung-Yueh Tu|AUTHOR Jung-Yueh Tu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SJTU, China; ^^2^^National Chengchi University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4491–4495&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigated the cross-linguistic perception of attitudinal intonation with willingness and reluctance in Mandarin by Korean L2 learners. In the current study, 20 Korean L2 learners of Mandarin (KL2) and 20 native Mandarin listeners (CL1) were instructed to rate perceived degree of willingness (1–5 Likert scale) from the utterances (with willingness, reluctance, and neutrality) produced by 2 native Mandarin speakers (one male and one female). The rating results showed that 1) the rating scores of willing attitude were significantly higher than those of reluctant attitude by KL2; 2) utterances of willingness and neutrality tend to be perceived less willing by KL2 than by CL1; 3) KL2 had a narrower rating range on the perception of attitudinal intonation than CL1. Specifically, Korean females had a wider rating range than Korean males. The findings indicated that 1) utterances of willingness, neutrality, and reluctance in Mandarin were accurately perceived by KL2; 2) willingness carried by attitudinal intonation was weakened through L2 pragmatic comprehension by KL2; 3) Korean females were more sensitive than Korean males on the perception of attitudinal intonation. The overall results suggest significant effects of language experience and gender difference on the perception of Chinese utterances with willingness and reluctance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rui Cheng|AUTHOR Rui Cheng]], [[Changchun Bao|AUTHOR Changchun Bao]]
</p><p class="cpabstractcardaffiliationlist">BJUT, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4496–4500&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement is an indispensable technology in the field of speech interaction. With the development of microphone array signal processing technology and deep learning, the beamforming combined with neural network has provided a more diverse solution for this field. In this paper, a multi-channel speech enhancement method is proposed, which combines beamforming and post-filtering based on neural network. The spatial features and phase information of target speech are incorporated into the beamforming by neural network, and a neural network based single-channel post-filtering with the phase correction is further combined to improve the performance. The experiments at different signal-to-noise ratio (SNR) levels confirmed that the proposed method results in an obvious improvement on speech quality and intelligibility compared to the reference methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mathieu Fontaine|AUTHOR Mathieu Fontaine]], [[Kouhei Sekiguchi|AUTHOR Kouhei Sekiguchi]], [[Aditya Arie Nugraha|AUTHOR Aditya Arie Nugraha]], [[Kazuyoshi Yoshii|AUTHOR Kazuyoshi Yoshii]]
</p><p class="cpabstractcardaffiliationlist">RIKEN, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4541–4545&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes multichannel speech enhancement based on a probabilistic model of complex source spectrograms for improving the intelligibility of speech corrupted by undesired noise. The univariate complex Gaussian model with the reproductive property supports the additivity of source complex spectrograms and forms the theoretical basis of nonnegative matrix factorization (NMF). Multichannel NMF (MNMF) is an extension of NMF based on the multivariate complex Gaussian model with spatial covariance matrices (SCMs), and its state-of-the-art variant called FastMNMF with jointly-diagonalizable SCMs achieves faster decomposition based on the univariate Gaussian model in the transformed domain where all time-frequency-channel elements are independent. Although a heavy-tailed extension of FastMNMF has been proposed to improve the robustness against impulsive noise, the source additivity has never been considered. The multivariate α-stable distribution does not have the reproductive property for the shape matrix parameter. This paper, therefore, proposes a heavy-tailed extension called α-stable FastMNMF which works in the transformed domain to use a univariate complex α-stable model, satisfying the reproductive property for any tail lightness parameter α and allowing the α-fractional Wiener filtering based on the element-wise source additivity. The experimental results show that α-stable FastMNMF with α = 1.8 significantly outperforms Gaussian FastMNMF (α=2).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu-Xuan Wang|AUTHOR Yu-Xuan Wang]]^^1^^, [[Jun Du|AUTHOR Jun Du]]^^1^^, [[Li Chai|AUTHOR Li Chai]]^^1^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^2^^, [[Jia Pan|AUTHOR Jia Pan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^Georgia Tech, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4501–4505&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a novel noise-aware memory-attention network (NAMAN) for regression-based speech enhancement, aiming at improving quality of enhanced speech in unseen noise conditions. The NAMAN architecture consists of three parts, a main regression network, a memory block and an attention block. First, a long short-term memory recurrent neural network (LSTM-RNN) is adopted as the main network to well model the acoustic context of neighboring frames. Next, the memory block is built with an extensive set of noise feature vectors as the prior noise bases. Finally, the attention block serves as an auxiliary network to improve the noise awareness of the main network by encoding the dynamic noise information at frame level through additional features obtained by weighing the existing noise basis vectors in the memory block. Our experiments show that the proposed NAMAN framework is compact and outperforms the state-of-the-art dynamic noise-aware training approaches in low SNR conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiaqi Su|AUTHOR Jiaqi Su]]^^1^^, [[Zeyu Jin|AUTHOR Zeyu Jin]]^^2^^, [[Adam Finkelstein|AUTHOR Adam Finkelstein]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Princeton University, USA; ^^2^^Adobe, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4506–4510&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Real-world audio recordings are often degraded by factors such as noise, reverberation, and equalization distortion. This paper introduces HiFi-GAN, a deep learning method to transform recorded speech to sound as though it had been recorded in a studio. We use an end-to-end feed-forward WaveNet architecture, trained with multi-scale adversarial discriminators in both the time domain and the time-frequency domain. It relies on the deep feature matching losses of the discriminators to improve the perceptual quality of enhanced speech. The proposed model generalizes well to new speakers, new speech content, and new environments. It significantly outperforms state-of-the-art baseline methods in both objective and subjective experiments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ashutosh Pandey|AUTHOR Ashutosh Pandey]], [[DeLiang Wang|AUTHOR DeLiang Wang]]
</p><p class="cpabstractcardaffiliationlist">Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4511–4515&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It is recently revealed that deep learning based speech enhancement systems do not generalize to untrained corpora in low signal-to-noise ratio (SNR) conditions, mainly due to the channel mismatch between trained and untrained corpora. In this study, we investigate techniques to improve cross-corpus generalization of complex spectrogram enhancement. First, we propose a long short-term memory (LSTM) network for complex spectral mapping. Evaluated on untrained noises and corpora, the proposed network substantially outperforms a state-of-the-art gated convolutional recurrent network (GCRN). Next, we examine the importance of training corpus for cross-corpus generalization. It is found that a training corpus that contains utterances with different channels can significantly improve performance on untrained corpora. Finally, we observe that using a smaller frame shift in short-time Fourier transform (STFT) is a simple but highly effective technique to improve cross-corpus generalization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Julius Richter|AUTHOR Julius Richter]], [[Guillaume Carbajal|AUTHOR Guillaume Carbajal]], [[Timo Gerkmann|AUTHOR Timo Gerkmann]]
</p><p class="cpabstractcardaffiliationlist">Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4516–4520&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We consider the problem of speech modeling in speech enhancement. Recently, deep generative approaches based on variational autoencoders have been proposed to model speech spectrograms. However, these approaches are based either on hierarchical or temporal dependencies of stochastic latent variables. In this paper, we propose a generative approach to speech enhancement based on a stochastic temporal convolutional network, which combines both hierarchical and temporal dependencies of stochastic variables. We evaluate our method with real recordings of different noisy environments. The proposed speech enhancement method outperforms a previous non-sequential approach based on feed-forward fully-connected networks in terms of speech distortion, instrumental speech quality and intelligibility. At the same time, the computational cost of the proposed generative speech model remains feasible, due to inherent parallelism of the convolutional architecture.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mandar Gogate|AUTHOR Mandar Gogate]], [[Kia Dashtipour|AUTHOR Kia Dashtipour]], [[Amir Hussain|AUTHOR Amir Hussain]]
</p><p class="cpabstractcardaffiliationlist">Edinburgh Napier University, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4521–4525&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present VIsual Speech In real nOisy eNvironments (VISION), a first of its kind audio-visual (AV) corpus comprising 2500 utterances from 209 speakers, recorded in real noisy environments including social gatherings, streets, cafeterias and restaurants. While a number of speech enhancement frameworks have been proposed in the literature that exploit AV cues, there are no visual speech corpora recorded in real environments with a sufficient variety of speakers, to enable evaluation of AV frameworks’ generalisation capability in a wide range of background visual and acoustic noises. The main purpose of our AV corpus is to foster research in the area of AV signal processing and to provide a benchmark corpus that can be used for reliable evaluation of AV speech enhancement systems in everyday noisy settings. In addition, we present a baseline deep neural network (DNN) based spectral mask estimation model for speech enhancement. Comparative simulation results with subjective listening tests demonstrate significant performance improvement of the baseline DNN compared to state-of-the-art speech enhancement approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aswin Sivaraman|AUTHOR Aswin Sivaraman]], [[Minje Kim|AUTHOR Minje Kim]]
</p><p class="cpabstractcardaffiliationlist">Indiana University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4526–4530&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work proposes a novel approach for reducing the computational complexity of speech denoising neural networks by using a sparsely active ensemble topology. In our ensemble networks, a gating module classifies an input noisy speech signal either by identifying speaker gender or by estimating signal degradation, and exclusively assigns it to a best-case specialist module, optimized to denoise a particular subset of the training data. This approach extends the hypothesis that speech denoising can be simplified if it is split into non-overlapping subproblems, contrasting earlier approaches that train large generalist neural networks to address a wide range of noisy speech data. We compare a baseline recurrent network against an ensemble of similarly designed, but smaller networks. Each network module is trained independently and combined to form a naïve ensemble. This can be further fine-tuned using a sparsity parameter to improve performance. Our experiments on noisy speech data — generated by mixing LibriSpeech and MUSAN datasets — demonstrate that a fine-tuned sparsely active ensemble can outperform a generalist using significantly fewer calculations. The key insight of this paper, leveraging model selection as a form of network compression, may be used to supplement already-existing deep learning methods for speech denoising.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vinith Kishore|AUTHOR Vinith Kishore]], [[Nitya Tiwari|AUTHOR Nitya Tiwari]], [[Periyasamy Paramasivam|AUTHOR Periyasamy Paramasivam]]
</p><p class="cpabstractcardaffiliationlist">Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4531–4535&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A deep learning based time domain single-channel speech enhancement technique using multilayer encoder-decoder and a temporal convolutional network is proposed for use in applications such as smart speakers and voice assistants. The technique uses encoder-decoder with convolutional layers for obtaining representation suitable for speech enhancement and a temporal convolutional network (TCN) based separator between the encoder and decoder to learn long-range dependencies. The technique derives inspiration from speech separation techniques that use TCN based separator between a single layer encoder-decoder. We propose to use a multilayer encoder-decoder to obtain a noise-independent representation useful for separating clean speech and noise. We present t-SNE-based analysis of the representation learned using different architectures for selecting the optimal number of encoder-decoder layers. We evaluate the proposed architectures using an objective measure of speech quality, scale-invariant source-to-noise ratio, and by obtaining word error rate on a speech recognition platform. The proposed two-layer encoder-decoder architecture resulted in 48% improvement in WER over unprocessed noisy data and 33% and 44% improvement in WER over two baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cunhang Fan|AUTHOR Cunhang Fan]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4536–4540&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Monaural speech dereverberation is a very challenging task because no spatial cues can be used. When the additive noises exist, this task becomes more challenging. In this paper, we propose a joint training method for simultaneous speech denoising and dereverberation using deep embedding representations. Firstly, at the denoising stage, the deep clustering (DC) network is used to extract noise-free deep embedding representations from the anechoic speech and residual reverberation signals. These deep embedding representations are represent the inferred spectral masking patterns of the desired signals so that they could discriminate the anechoic speech and the reverberant signals very well. Secondly, at the dereverberation stage, we utilize another supervised neural network to estimate the mask of anechoic speech from these deep embedding representations. Finally, the joint training algorithm is used to train the speech denoising and dereverberation network. Therefore, the noise reduction and dereverberation can be simultaneously optimized. Our experiments are conducted on the TIMIT dataset. Experimental results show that the proposed method outperforms the WPE and BLSTM baselines. Especially in the low SNR (-5 dB) condition, our proposed method produces a relative improvement of 7.8% for PESQ compared with BLSTM method and relative reductions of 16.3% and 19.3% for CD and LLR measures.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jian Huang|AUTHOR Jian Huang]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Zheng Lian|AUTHOR Zheng Lian]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4079–4083&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion is high-level paralinguistic information characteristics in speech. The most essential part of speech emotion recognition is to generate robust utterance-level emotional feature representations. The commonly used approaches are pooling methods based on various models, which may lead to the loss of detailed information for emotion classification. In this paper, we utilize the NetVLAD as trainable discriminative clustering to aggregate frame-level descriptors into a single utterance-level vector. In addition, to relieve the influence of imbalanced emotional classes, we utilize unigram label smoothing with prior emotional class distribution to regularize the model. Our experimental results on the Interactive Emotional Motion Capture (IEMOCAP) database reveal that our proposed methods are beneficial to performance improvement, which is 3% better than other models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Md. Asif Jalal|AUTHOR Md. Asif Jalal]], [[Rosanna Milner|AUTHOR Rosanna Milner]], [[Thomas Hain|AUTHOR Thomas Hain]], [[Roger K. Moore|AUTHOR Roger K. Moore]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4084–4088&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech emotion recognition is essential for obtaining emotional intelligence which affects the understanding of context and meaning of speech. The fundamental challenges of speech emotion recognition from a machine learning standpoint is to extract patterns which carry maximum correlation with the emotion information encoded in this signal, and to be as insensitive as possible to other types of information carried by speech. In this paper, a novel recurrent residual temporal context modelling framework is proposed. The framework includes mixture of multi-view attention smoothing and high dimensional feature projection for context expansion and learning feature representations. The framework is designed to be robust to changes in speaker and other distortions, and it provides state-of-the-art results for speech emotion recognition. Performance of the proposed approach is compared with a wide range of current architectures in a standard 4-class classification task on the widely used IEMOCAP corpus. A significant improvement of 4% unweighted accuracy over state-of-the-art systems is observed. Additionally, the attention vectors have been aligned with the input segments and plotted at two different attention levels to demonstrate the effectiveness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weiquan Fan|AUTHOR Weiquan Fan]]^^1^^, [[Xiangmin Xu|AUTHOR Xiangmin Xu]]^^1^^, [[Xiaofen Xing|AUTHOR Xiaofen Xing]]^^1^^, [[Dongyan Huang|AUTHOR Dongyan Huang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SCUT, China; ^^2^^UBTECH Robotics, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4089–4093&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech emotion recognition is a crucial part in human-computer interaction. However, representation learning is challenging due to much variability from speech emotion signals across diverse domains, such as gender, age, languages, and social cultural context. Many approaches focus on domain-invariant representation learning which loses the domain-specific knowledge and results in unsatisfactory speech emotion recognition across domains. In this paper, we propose an adaptive domain-aware representation learning that leverages the domain knowledge to extract domain aware features. The proposed approach applies attention model on frequency to embed the domain knowledge in the emotion representation space. Experiments demonstrate that our approach on IEMOCAP achieves the state-of-the-art performance under the same experimental conditions with WA of 73.02% and UA of 65.86%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Huan Zhou|AUTHOR Huan Zhou]], [[Kai Liu|AUTHOR Kai Liu]]
</p><p class="cpabstractcardaffiliationlist">Huawei Technologies, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4094–4097&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The performance of a speech emotion recognition (SER) system heavily relies on the deep feature learned from the speeches. Most state of the art has focused on developing various deep architectures for effective feature learning. In this study, we make the first attempt to explore feature discriminability instead. Based on our SER baseline system, we propose three approaches, two on loss functions and one on combined attentive pooling, to enhance feature discriminability. Evaluations on IEMOCAP database consistently validate the effectiveness of all our proposals. Compared to the baseline system, the proposed three systems demonstrated at least +4.0% absolute improvements in accuracy, with no increment in the total number of parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hengshun Zhou|AUTHOR Hengshun Zhou]]^^1^^, [[Jun Du|AUTHOR Jun Du]]^^1^^, [[Yan-Hui Tu|AUTHOR Yan-Hui Tu]]^^1^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^Georgia Tech, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4098–4102&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we investigate the effects of deep learning (DL)-based speech enhancement (SE) on speech emotion recognition (SER) in realistic environments. First, we use emotion speech data to train regression-based speech enhancement models which is shown to be beneficial to noisy speech emotion recognition. Next, to improve the model generalization capability of the regression model, an LSTM architecture with a design of hidden layers via simply densely-connected progressive learning, is adopted for the enhancement model. Finally, a post-processor utilizing an improved speech presence probability to estimate masks from the above proposed LSTM structure is shown to further improves recognition accuracies. Experiments results on the IEMOCAP and CHEAVD 2.0 corpora demonstrate that the proposed framework can yield consistent and significant improvements over the systems using unprocessed noisy speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yongwei Li|AUTHOR Yongwei Li]]^^1^^, [[Jianhua Tao|AUTHOR Jianhua Tao]]^^1^^, [[Bin Liu|AUTHOR Bin Liu]]^^1^^, [[Donna Erickson|AUTHOR Donna Erickson]]^^2^^, [[Masato Akagi|AUTHOR Masato Akagi]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Haskins Laboratories, USA; ^^3^^JAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4103–4107&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Since glottal source plays an important role for expressing emotions in speech, it is crucial to compare a set of glottal source parameter values to find differences in these expressions of emotions for emotional speech recognition and synthesis. This paper focuses on comparing a set of glottal source parameter values among varieties of emotional vowels /a/ (joy, neutral, anger, and sadness) using an improved ARX-LF model algorithm. The set of glottal source parameters included in the comparison were T,,p,,, T,,e,,, T,,a,,, E,,e,,, and F,,0,,(1/T,,0,,) in the LF model; parameter values were divided into 5 levels according to that of neutral vowel. Results showed that each emotion has its own levels for each set of the glottal source parameter value. These findings could be used for emotional speech recognition and synthesis.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Huang-Cheng Chou|AUTHOR Huang-Cheng Chou]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4108–4112&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>An individual’s emotion perception plays a key role in affecting our decision-making and task performances. Previous speech emotion recognition research focuses mainly on recognizing the emotion label derived from the majority vote (hard label) of the speaker (i.e., producer) but not on recognizing per-rater’s emotion perception. In this work, we propose a framework that integrates different viewpoints of emotion perception from other co-raters (exclude target rater) using soft and hard label learning to improve target rater’s emotion perception recognition. Our methods achieve [3.97%, 1.48%] and [1.71%, 2.87%] improvement on average unweighted accuracy recall (UAR) on the three-class (low, middle, and high class) [valence, activation (arousal)] emotion recognition task for four different raters on the IEMOCAP and the NNIME databases, respectively. Further analyses show that learning from the soft label of co-raters provides the most robust accuracy even without obtaining the target rater’s labels. By simply adding 50% of a target raters annotation, our framework performance mostly surpasses the model trained with 100% of raters annotations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Md. Asif Jalal|AUTHOR Md. Asif Jalal]], [[Rosanna Milner|AUTHOR Rosanna Milner]], [[Thomas Hain|AUTHOR Thomas Hain]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4113–4117&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech emotion recognition is essential for obtaining emotional intelligence which affects the understanding of context and meaning of speech. Harmonically structured vowel and consonant sounds add indexical and linguistic cues in spoken information. Previous research argued whether vowel sound cues were more important in carrying the emotional context from a psychological and linguistic point of view. Other research also claimed that emotion information could exist in small overlapping acoustic cues. However, these claims are not corroborated in computational speech emotion recognition systems. In this research, a convolution-based model and a long-short-term memory-based model, both using attention, are applied to investigate these theories of speech emotion on computational models. The role of acoustic context and word importance is demonstrated for the task of speech emotion recognition. The IEMOCAP corpus is evaluated by the proposed models, and 80.1% unweighted accuracy is achieved on pure acoustic data which is higher than current state-of-the-art models on this task. The phones and words are mapped to the attention vectors and it is seen that the vowel sounds are more important for defining emotion acoustic cues than the consonants, and the model can assign word importance based on acoustic context.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Iona Gessinger|AUTHOR Iona Gessinger]]^^1^^, [[Bernd Möbius|AUTHOR Bernd Möbius]]^^1^^, [[Bistra Andreeva|AUTHOR Bistra Andreeva]]^^1^^, [[Eran Raveh|AUTHOR Eran Raveh]]^^1^^, [[Ingmar Steiner|AUTHOR Ingmar Steiner]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität des Saarlandes, Germany; ^^2^^audEERING, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4118–4122&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The present paper compares phonetic accommodation of L1 French speakers in interaction with the simulated virtual language learning tutor for German, Mirabella, to that of L1 German speakers from a previous study. In a question-and-answer exchange, the L1 French speakers adapted the intonation contours of wh-questions as falling or rising according to the variant produced by Mirabella. However, they were not sensitive to a change of the nuclear pitch accent placement. In a map task, the L1 French speakers increased the number of dispreferred variants for the allophonic contrast [ɪç] vs. [ɪk] in the word ending  ⟨-ig⟩ when Mirabella used this variant. For the contrast [εː] vs. [eː] as a realization of stressed ⟨-ä-⟩, such a convergence effect was not found. Overall, the non-native speakers showed a similar degree of accommodative behavior towards Mirabella as the L1 German speakers. This suggests that incidental inductive learning through accommodation is possible. However, phenomena of the target language that deviate too radically from the native pattern seem to require more explicit training. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yike Yang|AUTHOR Yike Yang]], [[Si Chen|AUTHOR Si Chen]], [[Xi Chen|AUTHOR Xi Chen]]
</p><p class="cpabstractcardaffiliationlist">PolyU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4163–4167&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Cross-linguistic differences of F0 patterns have been found from both monolingual and bilingual speakers. However, previous studies either worked on intonation languages or compared an intonation language with a tone language. It still remains unknown whether there are F0 differences in bilingual speakers of tone languages. This study compared second language (L2) Mandarin with Cantonese and first language (L1) Mandarin, to test whether the L2 speakers of Mandarin have acquired the F0 patterns of Mandarin and whether there are influences from their L1 Cantonese. Different F0 measurements (including maximum F0, minimum F0, mean F0 and F0 range) were examined with linear mixed-effects models. Cantonese and Mandarin showed different F0 patterns, the source of which still requires further investigation. The L2 Mandarin data resembled the F0 patterns of Cantonese and were different from L1 Mandarin, for which we provided different explanations: assimilation of L1 Cantonese and L2 Mandarin, the negative transfer from native Cantonese, and similarities in the nature of tone languages. Suggestions for testing these assumptions are proposed. Lastly, our data provided conflicting results concerning the role of gender in F0 pattern realisation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuling Gu|AUTHOR Yuling Gu]]^^1^^, [[Nancy F. Chen|AUTHOR Nancy F. Chen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NYU, USA; ^^2^^A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4123–4127&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we investigate pronunciation differences in English spoken by Singaporean children in relation to their American and British counterparts by conducting archetypal clustering and formant space analysis on selected vowel pairs. Given that Singapore adopts British English as the institutional standard due to historical reasons, one might expect Singaporean children to follow British pronunciation patterns, but interestingly we observe that Singaporean children present similar patterns to American children when it comes to TRAP–BATH split vowels and /æ/ vs. /ε/ productions: Singaporean and American speakers both exhibit more fronted characteristics (p < 0.001) for vowels in these vowel pairs, resulting in less contrast compared to British speakers. In addition, when producing these vowels, the first formant frequency estimates of Singaporean children is consistently lower, suggesting a higher tongue position, distinguishing them from American and British speakers (p < 0.05).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Svetlana Kaminskaïa|AUTHOR Svetlana Kaminskaïa]]
</p><p class="cpabstractcardaffiliationlist">University of Waterloo, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4128–4132&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Studies of prosodic rhythm in a minority Ontario French using rhythm metrics did not demonstrate the effect of contact with English; moreover, they demonstrated an even more syllable-timed (French) pattern in this contact variety than in majority Canadian and European ones. To understand these results and further explore regional variation in Canadian French and the effect of linguistic contact, syllabic typology, length and duration of the stress group, syllable duration ratios, and vowel intensity are explored here through a comparison of a minority variety with a majority Canadian French (Quebec). Spontaneous samples show the same syllabic typology and distribution, stress group length and duration, similar syllable ratios, and a regular rhythmic pattern in both Canadian varieties. The analysis of intensity of stressed syllables, however, suggested divergence of the datasets from both traditional description of French and from each other. Thus, intensity accompanies primary stress in Ontario but not in Quebec, and both varieties use intensity to mark secondary stress. These results suggest a convergence to the neighboring English language and need to be confirmed in a controlled setting.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sreeja Manghat|AUTHOR Sreeja Manghat]]^^1^^, [[Sreeram Manghat|AUTHOR Sreeram Manghat]]^^1^^, [[Tanja Schultz|AUTHOR Tanja Schultz]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Independent Researcher, India; ^^2^^Universität Bremen, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4133–4137&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Grapheme to phoneme conversion is an integral aspect of speech processing. Conversational speech in Malayalam — a low resource Indic language has inter-sentential, intra-sentential code-switching as well as frequent intra-word code-switching with English. Monolingual G2P systems cannot process such special intra-word code-switching scenarios. A G2P system which can handle code-switching developed based on Malayalam-English code-switch speech and text corpora is presented. Since neither Malayalam nor English are phonetic subset of each other, the overlapping phonemes for English–Malayalam are identified and analysed. Additional rules used to handle special cases of Malayalam phonemes and intra-word code-switching in the G2P system is also presented specifically.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mathilde Hutin|AUTHOR Mathilde Hutin]]^^1^^, [[Adèle Jatteau|AUTHOR Adèle Jatteau]]^^2^^, [[Ioana Vasilescu|AUTHOR Ioana Vasilescu]]^^1^^, [[Lori Lamel|AUTHOR Lori Lamel]]^^1^^, [[Martine Adda-Decker|AUTHOR Martine Adda-Decker]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIMSI (UPR 3251), France; ^^2^^STL (UMR 8163), France; ^^3^^LIMSI (UPR 3251), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4138–4142&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Phonologization is a process whereby phonetic substance becomes phonological structure [1]. The process involves at least two steps: (i) a universal phonetic (‘automatic’) variation becomes a language-specific (‘speaker-controlled’) pattern, (ii) the language-specific pattern becomes a phonological (‘structured’) object. This paper will focus on the first step and ask the question of whether three universal phonetic variations of the laryngeal feature of word-final codas (final devoicing, voicelessness assimilation and voicing assimilation) are becoming language-specific patterns in two Romance languages, Romanian and French. Our results suggest that neutralization processes (final devoicing) might be beginning their phonologization process in both French and Romanian whereas assimilation processes (regressive assimilation of voicing and voicelessness) remain universal phonetic tendencies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maxwell Hope|AUTHOR Maxwell Hope]]^^1^^, [[Jason Lilley|AUTHOR Jason Lilley]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Delaware, USA; ^^2^^Nemours Biomedical Research, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4143–4147&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Perception of gender in voice is not an under-researched area. Previous studies have been conducted in the hopes of pinpointing what aspects of voice (e.g. fundamental frequency, intonation, etc.) carry the largest cues for skewing gender perception. These studies have to date been conducted within the framework of the gender binary, i.e. men’s vs. women’s voices, which have left out the exploration of perception of something besides simply femininity and masculinity.

The literature thus far has not endeavored to keep pitch in the “androgynous” zone while manipulating other aspects such as the F0 contour or other acoustic parameters. Additionally, past literature on speech perception has neglected to explicitly include members of the gender expansive community. Hence, we recruited participants of all genders and first sought to identify cues for gender perception in synthetically made voices and then examine the relationship between one’s own sense of gender identity and the perception of gender in synthetically made voices for native speakers of American English. We found that vocal tract acoustics are most important for swaying perception of gender and one’s own gender identity influences gender perception in voice.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alla Menshikova|AUTHOR Alla Menshikova]], [[Daniil Kocharov|AUTHOR Daniil Kocharov]], [[Tatiana Kachkovskaia|AUTHOR Tatiana Kachkovskaia]]
</p><p class="cpabstractcardaffiliationlist">Saint Petersburg State University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4148–4152&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It has been shown for a number of languages that speakers accommodate to each other in conversation. Such accommodation, or entrainment, reveals itself in many modalities including speech: interlocutors are found to entrain in intensity, fundamental frequency, tempo and other acoustic features. This paper presents data on speech entrainment in Russian using the standard measures for speech entrainment: proximity, convergence and synchrony. The research uses 49 dialogues from the SibLing speech corpus where speakers played a card-matching game. The list of acoustic features includes various measures of pitch, energy, spectral slope, HNR, jitter, and shimmer. The results for Russian are compared with those published previously for other languages.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chengwei Xu|AUTHOR Chengwei Xu]], [[Wentao Gu|AUTHOR Wentao Gu]]
</p><p class="cpabstractcardaffiliationlist">NJNU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4153–4157&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>As specialized social affects in speech communication, mock politeness and mock impoliteness are usually characterized by unique prosodic patterns that conflict with the literal meanings. To give a quantitative analysis of prosodic characteristics, a context-elicited discourse completion task was conducted to collect genuine and mock (im)polite Mandarin utterances in both imperative and interrogative modes. Results revealed that prosodic features played roles in a complex way. Mock polite speech showed a higher maximum F,,0,, and intensity, a wider range as well as a higher variability of F,,0,, and intensity, a lower HNR, and a higher jitter than genuine polite speech, whereas mock impolite speech showed a lower mean/maximum F,,0,, and intensity, a narrower range as well as a lower variability of F,,0,, and intensity, a slower speech rate, a higher HNR, and lower jitter, shimmer and H1-H2 than genuine impolite speech. In the perceptual experiment, the lower identification rates on mock (im)politeness indicated that perceptual judgement was influenced by literal meanings. Politeness ratings further showed that mock (im)polite speech was less (im)polite than genuine (im)polite speech, suggesting a good correspondence between prosodic manifestations and perceived politeness. Moreover, interrogatives sounded more polite than imperatives, also verifying the Tact Maxim principle for politeness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanping Li|AUTHOR Yanping Li]], [[Catherine T. Best|AUTHOR Catherine T. Best]], [[Michael D. Tyler|AUTHOR Michael D. Tyler]], [[Denis Burnham|AUTHOR Denis Burnham]]
</p><p class="cpabstractcardaffiliationlist">Western Sydney University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4158–4162&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The present study investigated tone variations in regionally accented Mandarin (i.e., Standard Mandarin [SM] spoken by dialectal Chinese speakers) as influenced by the varying tone systems of their native dialects. 12 female speakers, four each from Guangzhou, Shanghai and Yantai, were recruited to produce monosyllabic words in SM that included minimal contrasts among the four Mandarin lexical tones. Since SM developed from the Beijing dialect, their pronunciations were compared to the same Mandarin words produced by four Beijing female speakers. Regional Mandarin speakers successfully produced the four Mandarin lexical tones, but their productions varied from SM. Two crucial acoustic measures for Mandarin lexical tones, F0 (fundamental frequency) and duration values, were fitted into linear mixed-effects models on differences between regional and Beijing accents. Regional speakers had longer word duration and different F0 height when producing SM, resulting in variations in Mandarin lexical tones across the regional accents. These findings shed light on regional accent variations in Mandarin lexical tones and lay a foundation for deeper understanding of their impact on perception of accented Mandarin lexical tones by native (Beijing) Mandarin listeners.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yung-Sung Chuang|AUTHOR Yung-Sung Chuang]], [[Chi-Liang Liu|AUTHOR Chi-Liang Liu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]], [[Lin-shan Lee|AUTHOR Lin-shan Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4168–4172&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While various end-to-end models for spoken language understanding tasks have been explored recently, this paper is probably the first known attempt to challenge the very difficult task of end-to-end spoken question answering (SQA). Learning from the very successful BERT model for various text processing tasks, here we proposed an audio-and-text jointly learned SpeechBERT model. This model outperformed the conventional approach of cascading ASR with the following text question answering (TQA) model on datasets including ASR errors in answer spans, because the end-to-end model was shown to be able to extract information out of audio data before ASR produced errors. When ensembling the proposed end-to-end model with the cascade architecture, even better performance was achieved. In addition to the potential of end-to-end SQA, the SpeechBERT can also be considered for many other spoken language understanding tasks just as BERT for many text processing tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chia-Chih Kuo|AUTHOR Chia-Chih Kuo]], [[Shang-Bao Luo|AUTHOR Shang-Bao Luo]], [[Kuan-Yu Chen|AUTHOR Kuan-Yu Chen]]
</p><p class="cpabstractcardaffiliationlist">NTUST</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4173–4177&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In a spoken multiple-choice question answering (SMCQA) task, given a passage, a question, and multiple choices all in the form of speech, the machine needs to pick the correct choice to answer the question. While the audio could contain useful cues for SMCQA, usually only the auto-transcribed text is utilized in system development. Thanks to the large-scaled pre-trained language representation models, such as the bidirectional encoder representations from transformers (BERT), systems with only auto-transcribed text can still achieve a certain level of performance. However, previous studies have evidenced that acoustic-level statistics can offset text inaccuracies caused by the automatic speech recognition systems or representation inadequacy lurking in word embedding generators, thereby making the SMCQA system robust. Along the line of research, this study concentrates on designing a BERT-based SMCQA framework, which not only inherits the advantages of contextualized language representations learned by BERT, but integrates the complementary acoustic-level information distilled from audio with the text-level information. Consequently, an audio-enriched BERT-based SMCQA framework is proposed. A series of experiments demonstrates remarkable improvements in accuracy over selected baselines and SOTA systems on a published Chinese SMCQA dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Binxuan Huang|AUTHOR Binxuan Huang]]^^1^^, [[Han Wang|AUTHOR Han Wang]]^^2^^, [[Tong Wang|AUTHOR Tong Wang]]^^2^^, [[Yue Liu|AUTHOR Yue Liu]]^^2^^, [[Yang Liu|AUTHOR Yang Liu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Carnegie Mellon University, USA; ^^2^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4178–4182&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Entity Linking (EL) recognizes textual mentions of entities and maps them to the corresponding entities in a Knowledge Graph (KG). In this paper, we propose a novel method for EL on short text using entity representations base on their name labels, descriptions, and other related entities in the KG. We then leverage a pre-trained BERT model to calculate the semantic similarity between the entity and the text. This method does not require a large volume of data to jointly train word and entity representations, and is easily portable to a new domain with a KG. We demonstrate that our approach outperforms previous methods on a public benchmark dataset with a large margin.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mingxin Zhang|AUTHOR Mingxin Zhang]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Wenxin Hou|AUTHOR Wenxin Hou]], [[Shengzhou Gao|AUTHOR Shengzhou Gao]], [[Takahiro Shinozaki|AUTHOR Takahiro Shinozaki]]
</p><p class="cpabstractcardaffiliationlist">Tokyo Tech, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4183–4187&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The process of spoken language acquisition based on sound-image grounding has been one of the topics that has attracted the most significant interest of linguists and human scientists for decades. To understand the process and enable new possibilities for intelligent robots, we designed a spoken-language acquisition task in which a software robot learns to fulfill its desire by correctly identifying and uttering the name of its preferred object from the given images, without relying on any labeled dataset. We propose an unsupervised vision-based focusing strategy and a pre-training approach based on sound-image grounding to boost the efficiency of reinforcement learning. These ideas are motivated by the introspection that human babies first observe the world and then try actions to realize their desires. Our experiments show that the software robot can successfully acquire spoken language from spoken indications with images and dialogues. Moreover, the learning speed of reinforcement learning is significantly improved compared to several baseline approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kenta Yamamoto|AUTHOR Kenta Yamamoto]], [[Koji Inoue|AUTHOR Koji Inoue]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4188–4192&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We address character expression for spoken dialogue systems (e.g. extrovert). While conventional studies focused on controlling linguistic expressions, we focus on spoken dialogue behaviors. Specifically, the proposed model maps three character traits: extroversion, emotional instability, and politeness to four spoken dialogue behaviors: utterance amount, backchannel, filler, and switching pause length. It is costly to collect annotated data for training this kind of models. Therefore, we propose a semi-supervised learning approach to utilize not only a character impression data (labeled data) but also a corpus data (unlabeled data). Experimental results show that the proposed model expresses the target character traits through the behaviors more precisely than a baseline model that corresponds to the case of supervised learning only. Besides, we also investigate how to model unlabeled behavior (e.g. speech rate) by utilizing the advantage of semi-supervised learning.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaohan Shi|AUTHOR Xiaohan Shi]]^^1^^, [[Sixia Li|AUTHOR Sixia Li]]^^1^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^JAIST, Japan; ^^2^^JAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4193–4197&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion prediction in conversation is important for humans to conduct a fluent conversation, which is an underexplored research topic in the affective computing area. In previous studies, predicting the coming emotion only considered the context information from one single speaker. However, there are two sides of the speaker and listener in interlocutors, and their emotions are influenced by one another during the conversation. For this reason, we propose a dimensional emotion prediction model based on interactive information in conversation from both interlocutors. We investigate the effects of interactive information in four conversation situations on emotion prediction, in which emotional tendencies of interlocutors are consistent or inconsistent in both valence and arousal. The results showed that the proposed method performance better by considering the interactive context information than the ones considering one single side alone. The prediction result is affected by the conversation situations. In the situation interlocutors have consistent emotional tendency in valence and inconsistent tendency in arousal, the prediction performance of valence is the best. In the situation that interlocutors’ emotional tendency is inconsistent in both valence and arousal, the prediction performance of arousal is the best.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Asma Atamna|AUTHOR Asma Atamna]], [[Chloé Clavel|AUTHOR Chloé Clavel]]
</p><p class="cpabstractcardaffiliationlist">LTCI (UMR 5141), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4198–4202&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Natural and fluid human-robot interaction (HRI) systems rely on the robot’s ability to accurately assess the user’s //engagement// in the interaction. Current HRI systems for engagement analysis, and more broadly emotion recognition, only consider user data while discarding robot data which, in many cases, affects the user state. We present a novel recurrent neural architecture for online detection of user engagement decrease in a spontaneous HRI setting that exploits the robot data. Our architecture models the user as a distinct party in the conversation and uses the robot data as contextual information to help assess engagement. We evaluate our approach on a real-world highly imbalanced data set, where we observe up to 2.13% increase in F1 score compared to a standard gated recurrent unit (GRU).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Simone Fuscone|AUTHOR Simone Fuscone]]^^1^^, [[Benoit Favre|AUTHOR Benoit Favre]]^^2^^, [[Laurent Prévot|AUTHOR Laurent Prévot]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LPL (UMR 7309), France; ^^2^^LIS (UMR 7020), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4203–4207&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Predicting the acoustic and linguistic parameters of an upcoming conversational turn is important for dialogue systems aiming to include low-level adaptation with the user. It is known that during an interaction speakers could influence each other speech production. However, the precise dynamics of the phenomena is not well-established, especially in the context of natural conversations. We developed a model based on an RNN architecture that predicts speech variables (Energy, F0 range and Speech Rate) of the upcoming turn using a representation vector describing speech information of previous turns. We compare the prediction performances when using a dialogical history (from both participants) vs. monological history (from only upcoming turn’s speaker). We found that the information contained in previous turns produced by both the speaker and his interlocutor reduce the error in predicting current acoustic target variable. In addition the error in prediction decreases as increases the number of previous turns taken into account.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shengli Hu|AUTHOR Shengli Hu]]
</p><p class="cpabstractcardaffiliationlist">Dataminr, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4208–4212&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigate and explore the interplay of credibility and expertise level in text and speech. We collect a unique domain-specific multimodal dataset and analyze a set of acoustic-prosodic and linguistic features in both credible and less credible speech by professionals of varying expertise levels. Our analyses shed light on potential indicators of domain-specific perceived credibility and expertise, as well as the interplay in-between. Moreover, we build multimodal and multi-task deep learning models that outperform human performance by 6.2% in credibility and 3.8% in expertise level, building upon state-of-the-art self-supervised pre-trained language models. To our knowledge, this is the first multimodal multi-task study that analyzes and predicts domain-specific credibility and expertise level at the same time.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Krishna D. N.|AUTHOR Krishna D. N.]], [[Ankita Patil|AUTHOR Ankita Patil]]
</p><p class="cpabstractcardaffiliationlist">HashCut, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4243–4247&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we propose a new approach for multimodal emotion recognition using cross-modal attention and raw waveform based convolutional neural networks. Our approach uses audio and text information to predict the emotion label. We use an audio encoder to process the raw audio waveform to extract high-level features from the audio, and we use text encoder to extract high-level semantic information from text. We use cross-modal attention where the features from audio encoder attend to the features from text encoder and vice versa. This helps in developing interaction between speech and text sequences to extract most relevant features for emotion recognition. Our experiments show that the proposed approach obtains the state of the art results on IEMOCAP dataset [1]. We obtain 1.9% absolute improvement in accuracy compared to the previous state of the art method [2]. Our proposed approach uses 1D convolutional neural network to process the raw waveform instead of spectrogram features. Our experiments also shows that processing raw waveform gives a 0.54% improvement over spectrogram based modal.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rishika Agarwal|AUTHOR Rishika Agarwal]], [[Xiaochuan Niu|AUTHOR Xiaochuan Niu]], [[Pranay Dighe|AUTHOR Pranay Dighe]], [[Srikanth Vishnubhotla|AUTHOR Srikanth Vishnubhotla]], [[Sameer Badaskar|AUTHOR Sameer Badaskar]], [[Devang Naik|AUTHOR Devang Naik]]
</p><p class="cpabstractcardaffiliationlist">Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4288–4292&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>False triggers in voice assistants are unintended invocations of the assistant, which not only degrade the user experience but may also compromise privacy. False trigger mitigation (FTM) is a process to detect the false trigger events and respond appropriately to the user. In this paper, we propose a novel solution to the FTM problem by introducing a parallel ASR decoding process with a special language model trained from “out-of-domain” data sources. Such language model is complementary to the existing language model optimized for the assistant task. A bidirectional lattice RNN (Bi-LRNN) classifier trained from the lattices generated by the complementary language model shows a 38.34% relative reduction of the false trigger (FT) rate at the fixed rate of 0.4% false suppression (FS) of correct invocations, compared to the current Bi-LRNN model. In addition, we propose to train a parallel Bi-LRNN model based on the decoding lattices from both language models, and examine various ways of implementation. The resulting model leads to further reduction in the false trigger rate by 10.8%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Potsawee Manakul|AUTHOR Potsawee Manakul]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Linlin Wang|AUTHOR Linlin Wang]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4248–4252&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Abstractive summarization is a standard task for written documents, such as news articles. Applying summarization schemes to spoken documents is more challenging, especially in situations involving human interactions, such as meetings. Here, utterances tend not to form complete sentences and sometimes contain little information. Moreover, speech disfluencies will be present as well as recognition errors for automated systems. For current attention-based sequence-to-sequence summarization systems, these additional challenges can yield a poor attention distribution over the spoken document words and utterances, impacting performance. In this work, we propose a multi-stage method based on a hierarchical encoder-decoder model to explicitly model utterance-level attention distribution at training time; and enforce diversity at inference time using a unigram diversity term. Furthermore, multitask learning tasks including dialogue act classification and extractive summarization are incorporated. The performance of the system is evaluated on the AMI meeting corpus. The inclusion of both training and inference diversity terms improves performance, outperforming current state-of-the-art systems in terms of ROUGE scores. Additionally, the impact of ASR errors, as well as performance on the multitask learning tasks, is evaluated.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yichi Zhang|AUTHOR Yichi Zhang]]^^1^^, [[Yinpei Dai|AUTHOR Yinpei Dai]]^^1^^, [[Zhijian Ou|AUTHOR Zhijian Ou]]^^1^^, [[Huixin Wang|AUTHOR Huixin Wang]]^^2^^, [[Junlan Feng|AUTHOR Junlan Feng]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^China Mobile, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4253–4257&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, two categories of linguistic knowledge sources, word definitions from monolingual dictionaries and linguistic relations (e.g. synonymy and antonymy), have been leveraged separately to improve the traditional co-occurrence based methods for learning word embeddings. In this paper, we investigate to leverage these two kinds of resources together. Specifically, we propose a new method for word embedding specialization, named Definition Autoencoder with Semantic Injection (DASI). In our experiments¹, DASI outperforms its single-knowledge-source counterparts on two semantic similarity benchmarks, and the improvements are further justified on a downstream task of dialog state tracking. We also show that DASI is superior over simple combinations of existing methods in incorporating the two knowledge sources.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yiming Wang|AUTHOR Yiming Wang]]^^1^^, [[Hang Lv|AUTHOR Hang Lv]]^^2^^, [[Daniel Povey|AUTHOR Daniel Povey]]^^3^^, [[Lei Xie|AUTHOR Lei Xie]]^^2^^, [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^Northwestern Polytechnical University, China; ^^3^^Xiaomi, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4258–4262&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Always-on spoken language interfaces, e.g. personal digital assistants, rely on a //wake word// to start processing spoken input. We present novel methods to train a hybrid DNN/HMM wake word detection system from partially labeled training data, and to use it in on-line applications: (i) we remove the prerequisite of frame-level alignments in the LF-MMI training algorithm, permitting the use of un-transcribed training examples that are annotated only for the presence/absence of the wake word; (ii) we show that the classical keyword/filler model must be supplemented with an explicit non-speech (silence) model for good performance; (iii) we present an FST-based decoder to perform online detection. We evaluate our methods on two real data sets, showing 50%–90% reduction in false rejection rates at pre-specified false alarm rates over the best previously published figures, and re-validate them on a third (large) data set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Thai Binh Nguyen|AUTHOR Thai Binh Nguyen]], [[Quang Minh Nguyen|AUTHOR Quang Minh Nguyen]], [[Thi Thu Hien Nguyen|AUTHOR Thi Thu Hien Nguyen]], [[Quoc Truong Do|AUTHOR Quoc Truong Do]], [[Chi Mai Luong|AUTHOR Chi Mai Luong]]
</p><p class="cpabstractcardaffiliationlist">VAIS, Vietnam</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4263–4267&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Studies on the Named Entity Recognition (NER) task have shown outstanding results that reach human parity on input texts with correct text formattings, such as with proper punctuation and capitalization. However, such conditions are not available in applications where the input is speech, because the text is generated from a speech recognition system (ASR), and that the system does not consider the text formatting. In this paper, we (1) presented the first Vietnamese speech dataset for NER task, and (2) the first pre-trained public large-scale monolingual language model for Vietnamese that achieved the new state-of-the-art for the Vietnamese NER task by 1.3% absolute F1 score comparing to the latest study. And finally, (3) we proposed a new pipeline for NER task from speech that overcomes the text formatting problem by introducing a text capitalization and punctuation recovery model (CaPu) into the pipeline. The model takes input text from an ASR system and performs two tasks at the same time, producing proper text formatting that helps to improve NER performance. Experimental results indicated that the CaPu model helps to improve by nearly 4% of F1-score.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hemant Yadav|AUTHOR Hemant Yadav]]^^1^^, [[Sreyan Ghosh|AUTHOR Sreyan Ghosh]]^^1^^, [[Yi Yu|AUTHOR Yi Yu]]^^2^^, [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IIIT Delhi, India; ^^2^^NII, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4268–4272&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Named entity recognition (NER) from text has been a widely studied problem and usually extracts semantic information from text. Until now, NER from speech is mostly studied in a two-step pipeline process that includes first applying an automatic speech recognition (ASR) system on an audio sample and then passing the predicted transcript to a NER tagger. In such cases, the error does not propagate from one step to another as both the tasks are not optimized in an end-to-end (E2E) fashion. Recent studies confirm that integrated approaches (e.g., E2E ASR) outperform sequential ones (e.g., phoneme based ASR). In this paper, we introduce a first publicly available NER annotated dataset for English speech and present an E2E approach, which jointly optimizes the ASR and NER tagger components. Experimental results show that the proposed E2E approach outperforms the classical two-step approach. We also discuss how NER from speech can be used to handle out of vocabulary (OOV) words in an ASR system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joseph P. McKenna|AUTHOR Joseph P. McKenna]], [[Samridhi Choudhary|AUTHOR Samridhi Choudhary]], [[Michael Saxon|AUTHOR Michael Saxon]], [[Grant P. Strimel|AUTHOR Grant P. Strimel]], [[Athanasios Mouchtaris|AUTHOR Athanasios Mouchtaris]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4273–4277&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end spoken language understanding (SLU) models are a class of model architectures that predict semantics directly from speech. Because of their input and output types, we refer to them as speech-to-interpretation (STI) models. Previous works have successfully applied STI models to targeted use cases, such as recognizing home automation commands, however no study has yet addressed how these models generalize to broader use cases. In this work, we analyze the relationship between the performance of STI models and the difficulty of the use case to which they are applied. We introduce empirical measures of dataset //semantic complexity// to quantify the difficulty of the SLU tasks. We show that near-perfect performance metrics for STI models reported in the literature were obtained with datasets that have low semantic complexity values. We perform experiments where we vary the semantic complexity of a large, proprietary dataset and show that STI model performance correlates with our semantic complexity measures, such that performance increases as complexity values decrease. Our results show that it is important to contextualize an STI model’s performance with the complexity values of its training dataset to reveal the scope of its applicability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Trang Tran|AUTHOR Trang Tran]]^^1^^, [[Morgan Tinkler|AUTHOR Morgan Tinkler]]^^2^^, [[Gary Yeung|AUTHOR Gary Yeung]]^^2^^, [[Abeer Alwan|AUTHOR Abeer Alwan]]^^2^^, [[Mari Ostendorf|AUTHOR Mari Ostendorf]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Washington, USA; ^^2^^University of California at Los Angeles, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4278–4282&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Disfluencies are prevalent in spontaneous speech, as shown in many studies of adult speech. Less is understood about children’s speech, especially in pre-school children who are still developing their language skills. We present a novel dataset with annotated disfluencies of spontaneous explanations from 26 children (ages 5–8), interviewed twice over a year-long period. Our preliminary analysis reveals significant differences between children’s speech in our corpus and adult spontaneous speech from two corpora (Switchboard and CallHome). Children have higher disfluency and filler rates, tend to use nasal filled pauses more frequently, and on average exhibit longer reparandums than repairs, in contrast to adult speakers. Despite the differences, an automatic disfluency detection system trained on adult (Switchboard) speech transcripts performs reasonably well on children’s speech, achieving an F1 score that is 10% higher than the score on an adult out-of-domain dataset (CallHome).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ashish Mittal|AUTHOR Ashish Mittal]]^^1^^, [[Samarth Bharadwaj|AUTHOR Samarth Bharadwaj]]^^1^^, [[Shreya Khare|AUTHOR Shreya Khare]]^^1^^, [[Saneem Chemmengath|AUTHOR Saneem Chemmengath]]^^1^^, [[Karthik Sankaranarayanan|AUTHOR Karthik Sankaranarayanan]]^^1^^, [[Brian Kingsbury|AUTHOR Brian Kingsbury]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, India; ^^2^^IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4283–4287&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken intent detection has become a popular approach to interface with various smart devices with ease. However, such systems are limited to the preset list of //intents-terms// or //commands//, which restricts the quick customization of personal devices to new intents. This paper presents a few-shot spoken intent classification approach with task-agnostic representations via meta-learning paradigm. Specifically, we leverage the popular representation based meta-learning learning to build a task-agnostic representation of utterances, that then use a linear classifier for prediction. We evaluate three such approaches on our novel experimental protocol developed on two popular spoken intent classification datasets: Google Commands and the Fluent Speech Commands dataset. For a 5-shot (1-shot) classification of novel classes, the proposed framework provides an average classification accuracy of 88.6% (76.3%) on the Google Commands dataset, and 78.5% (64.2%) on the Fluent Speech Commands dataset. The performance is comparable to traditionally supervised classification models with abundant training samples.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tianchi Liu|AUTHOR Tianchi Liu]]^^1^^, [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]]^^2^^, [[Maulik Madhavi|AUTHOR Maulik Madhavi]]^^2^^, [[Shengmei Shen|AUTHOR Shengmei Shen]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Pensees, Singapore; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4293–4297&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we study a novel technique that exploits the interaction between speaker traits and linguistic content to improve both speaker verification and utterance verification performance. We implement an idea of speaker-utterance dual attention (SUDA) in a unified neural network. The dual attention refers to an attention mechanism for the two tasks of speaker and utterance verification. The proposed SUDA features an attention mask mechanism to learn the interaction between the speaker and utterance information streams. This helps to focus only on the required information for respective task by masking the irrelevant counterparts. The studies conducted on RSR2015 corpus confirm that the proposed SUDA outperforms the framework without attention mask as well as several competitive systems for both speaker and utterance verification.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lu Yi|AUTHOR Lu Yi]], [[Man-Wai Mak|AUTHOR Man-Wai Mak]]
</p><p class="cpabstractcardaffiliationlist">PolyU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4298–4302&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Typically, speaker verification systems are highly optimized on the speech collected by close-talking microphones. However, these systems will perform poorly when the users use far-field microphones during verification. In this paper, we propose an adversarial separation and adaptation network (ADSAN) to extract speaker discriminative and domain-invariant features through adversarial learning. The idea is based on the notion that speaker embedding comprises domain-specific components and domain-shared components, and that the two components can be disentangled by the interplay of the separation network and the adaptation network in the ADSAN. We also propose to incorporate a mutual information neural estimator into the domain adaptation network to retain speaker discriminative information. Experiments on the VOiCES Challenge 2019 demonstrate that the proposed approaches can produce more domain-invariant and speaker discriminative representations, which could help to reduce the domain shift caused by different types of microphones and reverberant environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hyewon Han|AUTHOR Hyewon Han]], [[Soo-Whan Chung|AUTHOR Soo-Whan Chung]], [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]
</p><p class="cpabstractcardaffiliationlist">Yonsei University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4303–4307&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many approaches can derive information about a single speaker’s identity from the speech by learning to recognize consistent characteristics of acoustic parameters. However, it is challenging to determine identity information when there are multiple concurrent speakers in a given signal. In this paper, we propose a novel deep speaker representation strategy that can reliably extract multiple speaker identities from an overlapped speech. We design a network that can extract a high-level embedding that contains information about each speaker’s identity from a given mixture. Unlike conventional approaches that need reference acoustic features for training, our proposed algorithm only requires the speaker identity labels of the overlapped speech segments. We demonstrate the effectiveness and usefulness of our algorithm in a speaker verification task and a speech separation system conditioned on the target speaker embeddings obtained through the proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weiwei Lin|AUTHOR Weiwei Lin]]^^1^^, [[Man-Wai Mak|AUTHOR Man-Wai Mak]]^^1^^, [[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^PolyU, China; ^^2^^National Chiao Tung University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4308–4312&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art speaker verification (SV) systems typically consist of two distinct components: a deep neural network (DNN) for creating speaker embeddings and a backend for improving the embeddings’ discriminative ability. The question which arises is: Can we train an SV system without a backend? We believe that the backend is to compensate for the fact that the network is trained entirely on short speech segments. This paper shows that with several modifications to the x-vector system, DNN embeddings can be directly used for verification. The proposed modifications include: (1) a mask-pooling layer that augments the training samples by randomly masking the frame-level activations and then computing temporal statistics, (2) a sampling scheme that produces diverse training samples by randomly splicing several speech segments from each utterance, and (3) additional convolutional layers designed to reduce the temporal resolution to save computational cost. Experiments on NIST SRE 2016 and 2018 show that our method can achieve state-of-the-art performance with simple cosine similarity and requires only half of the computational cost of the x-vector network.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rosa González Hautamäki|AUTHOR Rosa González Hautamäki]], [[Tomi Kinnunen|AUTHOR Tomi Kinnunen]]
</p><p class="cpabstractcardaffiliationlist">University of Eastern Finland, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4313–4317&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modern automatic speaker verification (ASV) relies heavily on machine learning implemented through deep neural networks. It can be difficult to interpret the output of these black boxes. In line with interpretative machine learning, we model the dependency of ASV detection score upon acoustic mismatch of the enrollment and test utterances. We aim to identify mismatch factors that explain target speaker misses (false rejections). We use distance in the first- and second-order statistics of selected acoustic features as the predictors in a linear mixed effects model, while a standard Kaldi x-vector system forms our ASV black-box. Our results on the VoxCeleb data reveal the most prominent mismatch factor to be in F0 mean, followed by mismatches associated with formant frequencies. Our findings indicate that x-vector systems lack robustness to intra-speaker variations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amber Afshan|AUTHOR Amber Afshan]]^^1^^, [[Jinxi Guo|AUTHOR Jinxi Guo]]^^1^^, [[Soo Jin Park|AUTHOR Soo Jin Park]]^^1^^, [[Vijay Ravi|AUTHOR Vijay Ravi]]^^1^^, [[Alan McCree|AUTHOR Alan McCree]]^^2^^, [[Abeer Alwan|AUTHOR Abeer Alwan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of California at Los Angeles, USA; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4318–4322&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The effects of speaking-style variability on automatic speaker verification were investigated using the UCLA Speaker Variability database which comprises multiple speaking styles per speaker. An x-vector/PLDA (probabilistic linear discriminant analysis) system was trained with the SRE and Switchboard databases with standard augmentation techniques and evaluated with utterances from the UCLA database. The equal error rate (EER) was low when enrollment and test utterances were of the same style (e.g., 0.98% and 0.57% for read and conversational speech, respectively), but it increased substantially when styles were mismatched between enrollment and test utterances. For instance, when enrolled with conversation utterances, the EER increased to 3.03%, 2.96% and 22.12% when tested on read, narrative, and pet-directed speech, respectively. To reduce the effect of style mismatch, we propose an entropy-based variable frame rate technique to artificially generate style-normalized representations for PLDA adaptation. The proposed system significantly improved performance. In the aforementioned conditions, the EERs improved to 2.69% (conversation – read), 2.27% (conversation – narrative), and 18.75% (pet-directed – read). Overall, the proposed technique performed comparably to multi-style PLDA adaptation without the need for training data in different speaking styles per speaker.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mathieu Seurin|AUTHOR Mathieu Seurin]]^^1^^, [[Florian Strub|AUTHOR Florian Strub]]^^2^^, [[Philippe Preux|AUTHOR Philippe Preux]]^^1^^, [[Olivier Pietquin|AUTHOR Olivier Pietquin]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Inria, France; ^^2^^Deepmind, France; ^^3^^Google, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4323–4327&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker recognition is a well known and studied task in the speech processing domain. It has many applications, either for security or speaker adaptation of personal devices. In this paper, we present a new paradigm for automatic speaker recognition that we call Interactive Speaker Recognition (ISR). In this paradigm, the recognition system aims to incrementally build a representation of the speakers by requesting personalized utterances to be spoken in contrast to the standard text-dependent or text-independent schemes. To do so, we cast the speaker recognition task into a sequential decision-making problem that we solve with Reinforcement Learning. Using a standard dataset, we show that our method achieves excellent performance while using little speech signal amounts. This method could also be applied as an utterance selection mechanism for building speech synthesis systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Filip Granqvist|AUTHOR Filip Granqvist]]^^1^^, [[Matt Seigel|AUTHOR Matt Seigel]]^^1^^, [[Rogier van Dalen|AUTHOR Rogier van Dalen]]^^1^^, [[Áine Cahill|AUTHOR Áine Cahill]]^^1^^, [[Stephen Shum|AUTHOR Stephen Shum]]^^2^^, [[Matthias Paulik|AUTHOR Matthias Paulik]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Apple, UK; ^^2^^Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4328–4332&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Information on speaker characteristics can be useful as side information in improving speaker recognition accuracy. However, such information is often private. This paper investigates how privacy-preserving learning can improve a speaker verification system, by enabling the use of privacy-sensitive speaker data to train an auxiliary classification model that predicts vocal characteristics of speakers. In particular, this paper explores the utility achieved by approaches which combine different federated learning and differential privacy mechanisms. These approaches make it possible to train a central model while protecting user privacy, with users’ data remaining on their devices. Furthermore, they make learning on a large population of speakers possible, ensuring good coverage of speaker characteristics when training a model. The auxiliary model described here uses features extracted from phrases which trigger a speaker verification system. From these features, the model predicts speaker characteristic labels considered useful as side information. The knowledge of the auxiliary model is distilled into a speaker verification system using multi-task learning, with the side information labels predicted by this auxiliary model being the additional task. This approach results in a 6% relative improvement in equal error rate over a baseline system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shreyas Ramoji|AUTHOR Shreyas Ramoji]], [[Prashant Krishnan|AUTHOR Prashant Krishnan]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4333–4337&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While deep learning models have made significant advances in supervised classification problems, the application of these models for out-of-set verification tasks like speaker recognition has been limited to deriving feature embeddings. The state-of-the-art x-vector PLDA based speaker verification systems use a generative model based on probabilistic linear discriminant analysis (PLDA) for computing the verification score. Recently, we had proposed a neural network approach for backend modeling in speaker verification called the neural PLDA (NPLDA) where the likelihood ratio score of the generative PLDA model is posed as a discriminative similarity function and the learnable parameters of the score function are optimized using a verification cost. In this paper, we extend this work to achieve joint optimization of the embedding neural network (x-vector network) with the NPLDA network in an end-to-end (E2E) fashion. This proposed end-to-end model is optimized directly from the acoustic features with a verification cost function and during testing, the model directly outputs the likelihood ratio score. With various experiments using the NIST speaker recognition evaluation (SRE) 2018 and 2019 datasets, we show that the proposed E2E model improves significantly over the x-vector PLDA baseline speaker verification system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kuba Łopatka|AUTHOR Kuba Łopatka]]^^1^^, [[Tobias Bocklet|AUTHOR Tobias Bocklet]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Intel, Poland; ^^2^^Intel, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4338–4342&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a new training method to improve HMM-based keyword spotting. The loss function is based on a score computed with the keyword/filler model from the entire input sequence. It is equivalent to max/attention pooling but is based on prior acoustic knowledge. We also employ a multi-task learning setup by predicting both LVCSR and keyword posteriors. We compare our model to a baseline trained on frame-wise cross entropy, with and without per-class weighting. We employ a low-footprint TDNN for acoustic modeling. The proposed training yields significant and consistent improvement over the baseline in adverse noise conditions. The FRR on cafeteria noise is reduced from 13.07% to 5.28% at 9 dB SNR and from 37.44% to 6.78% at 5 dB SNR. We obtain these results with only 600 unique training keyword samples. The training method is independent of the frontend and acoustic model topology.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[S. Shahnawazuddin|AUTHOR S. Shahnawazuddin]]^^1^^, [[Nagaraj Adiga|AUTHOR Nagaraj Adiga]]^^2^^, [[Kunal Kumar|AUTHOR Kunal Kumar]]^^1^^, [[Aayushi Poddar|AUTHOR Aayushi Poddar]]^^1^^, [[Waquar Ahmad|AUTHOR Waquar Ahmad]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NIT Patna, India; ^^2^^University of Crete, Greece; ^^3^^NIT Calicut, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4382–4386&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic recognition of children’s speech is a challenging research problem due to several reasons. One among those is unavailability of large amounts of speech data from child speakers to develop automatic speech recognition (ASR) systems employing deep learning architectures. Using a limited amount of training data limits the power of the learned system. To overcome this issue, we have explored means to effectively make use of adults’ speech data for training an ASR system. For that purpose, generative adversarial network (GAN) based voice conversion (VC) is exploited to modify the acoustic attributes of adults’ speech making it perceptually similar to that of children’s speech. The original and converted speech samples from adult speakers are then pooled together to learn the statistical model parameters. Significantly improved recognition rate for children’s speech is noted due to VC-based data augmentation. To further enhance the recognition rate, a limited amount of children’s speech data is also pooled into training. Large reduction in error rate is observed in this case as well. It is worth mentioning that GAN-based VC does not change the speaking-rate. To demonstrate the need to deal with speaking-rate differences we report the results of time-scale modification of children’s speech test data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andrew Hard|AUTHOR Andrew Hard]], [[Kurt Partridge|AUTHOR Kurt Partridge]], [[Cameron Nguyen|AUTHOR Cameron Nguyen]], [[Niranjan Subrahmanya|AUTHOR Niranjan Subrahmanya]], [[Aishanee Shah|AUTHOR Aishanee Shah]], [[Pai Zhu|AUTHOR Pai Zhu]], [[Ignacio Lopez Moreno|AUTHOR Ignacio Lopez Moreno]], [[Rajiv Mathews|AUTHOR Rajiv Mathews]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4343–4347&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We demonstrate that a production-quality keyword-spotting model can be trained on-device using federated learning and achieve comparable false accept and false reject rates to a centrally-trained model. To overcome the algorithmic constraints associated with fitting on-device data (which are inherently non-independent and identically distributed), we conduct thorough empirical studies of optimization algorithms and hyperparameter configurations using large-scale federated simulations. To overcome resource constraints, we replace memory-intensive MTR data augmentation with SpecAugment, which reduces the false reject rate by 56%. Finally, to label examples (given the zero visibility into on-device data), we explore teacher-student training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rongqing Huang|AUTHOR Rongqing Huang]]^^1^^, [[Ossama Abdel-hamid|AUTHOR Ossama Abdel-hamid]]^^2^^, [[Xinwei Li|AUTHOR Xinwei Li]]^^1^^, [[Gunnar Evermann|AUTHOR Gunnar Evermann]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Apple, USA; ^^2^^Apple, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4348–4351&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years, all-neural, end-to-end (E2E) ASR systems gained rapid interest in the speech recognition community. They convert speech input to text units in a single trainable Neural Network model. In ASR, many utterances contain rich named entities. Such named entities may be user or location specific and they are not seen during training. A single model makes it inflexible to utilize dynamic contextual information during inference. In this paper, we propose to train a context aware E2E model and allow the beam search to traverse into the context FST during inference. We also propose a simple method to adjust the cost discrepancy between the context FST and the base model. This algorithm is able to reduce the named entity utterance WER by 57% with little accuracy degradation on regular utterances. Although an E2E model does not need a pronunciation dictionary, it’s interesting to make use of existing pronunciation knowledge to improve accuracy. In this paper, we propose an algorithm to map the rare entity words to common words via pronunciation and treat the mapped words as an alternative form to the original word during recognition. This algorithm further reduces the WER on the named entity utterances by another 31%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lasse Borgholt|AUTHOR Lasse Borgholt]]^^1^^, [[Jakob D. Havtorn|AUTHOR Jakob D. Havtorn]]^^2^^, [[Željko Agić|AUTHOR Željko Agić]]^^2^^, [[Anders Søgaard|AUTHOR Anders Søgaard]]^^1^^, [[Lars Maaløe|AUTHOR Lars Maaløe]]^^2^^, [[Christian Igel|AUTHOR Christian Igel]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Copenhagen, Denmark; ^^2^^Corti, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4352–4356&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The two most common paradigms for end-to-end speech recognition are connectionist temporal classification (CTC) and attention-based encoder-decoder (AED) models. It has been argued that the latter is better suited for learning an implicit language model. We test this hypothesis by measuring temporal context sensitivity and evaluate how the models perform when we constrain the amount of contextual information in the audio input. We find that the AED model is indeed more context sensitive, but that the gap can be closed by adding self-attention to the CTC model. Furthermore, the two models perform similarly when contextual information is constrained. Finally, in contrast to previous research, our results show that the CTC model is highly competitive on WSJ and LibriSpeech without the help of an external language model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ankur Kumar|AUTHOR Ankur Kumar]]^^1^^, [[Sachin Singh|AUTHOR Sachin Singh]]^^1^^, [[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]]^^2^^, [[Abhinav Garg|AUTHOR Abhinav Garg]]^^2^^, [[Shatrughan Singh|AUTHOR Shatrughan Singh]]^^1^^, [[Chanwoo Kim|AUTHOR Chanwoo Kim]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Samsung, India; ^^2^^Samsung, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4357–4361&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present techniques to compute confidence score on the predictions made by an end-to-end speech recognition model. Our proposed neural confidence measure (NCM) is trained as a binary classification task to accept or reject an end-to-end speech recognition result. We incorporate features from an encoder, a decoder, and an attention block of the attention-based end-to-end speech recognition model to improve NCM significantly. We observe that using information from multiple beams further improves the performance. As a case study of this NCM, we consider an application of the utterance-level confidence score in a distributed speech recognition environment with two or more speech recognition systems running on different platforms with varying resource capabilities. We show that around 57% computation on a resource-rich high-end platform (e.g. a cloud platform) can be saved without sacrificing accuracy compared to the high-end only solution. Around 70–80% of computations can be saved if we allow a degradation of word error rates to within 5–10% relative to the high-end solution.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Huaxin Wu|AUTHOR Huaxin Wu]]^^1^^, [[Genshun Wan|AUTHOR Genshun Wan]]^^2^^, [[Jia Pan|AUTHOR Jia Pan]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^iFLYTEK, China; ^^2^^USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4362–4366&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The performance of automatic speech recognition systems can be improved by speaker adaptive training (SAT), which adapts an acoustic model to compensate for the mismatch between training and testing conditions. Speaker code learning is one of the useful ways for speaker adaptive training. It learns a set of speaker dependent codes together with speaker independent acoustic model in order to remove speaker variation. Conventionally, speaker dependent codes and speaker independent acoustic model are jointly optimized. However, this could make it difficult to decouple the speaker code from the acoustic model. In this paper, we take the speaker code based SAT as a meta-learning task. The acoustic model is considered as meta-knowledge, while speaker code is considered as task specific knowledge. Experiments on the Switchboard task show that our method can not only learn a good speaker code, but also improve the performance of the acoustic model even without speaker code.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Han Zhu|AUTHOR Han Zhu]]^^1^^, [[Jiangjiang Zhao|AUTHOR Jiangjiang Zhao]]^^2^^, [[Yuling Ren|AUTHOR Yuling Ren]]^^2^^, [[Li Wang|AUTHOR Li Wang]]^^1^^, [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^China Mobile, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4367–4371&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>When only limited target domain data is available, domain adaptation could be used to promote performance of deep neural network (DNN) acoustic model by leveraging well-trained source model and target domain data. However, suffering from domain mismatch and data sparsity, domain adaptation is very challenging. This paper proposes a novel adaptation method for DNN acoustic model using class similarity. Since the output distribution of DNN model contains the knowledge of similarity among classes, which is applicable to both source and target domain, it could be transferred from source to target model for the performance improvement. In our approach, we first compute the frame level posterior probabilities of source samples using source model. Then, for each class, probabilities of this class are used to compute a mean vector, which we refer to as mean soft labels. During adaptation, these mean soft labels are used in a regularization term to train the target model. Experiments showed that our approach outperforms fine-tuning using one-hot labels on both accent and noise adaptation task, especially when source and target domain are highly mismatched.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sashi Novitasari|AUTHOR Sashi Novitasari]], [[Andros Tjandra|AUTHOR Andros Tjandra]], [[Tomoya Yanagita|AUTHOR Tomoya Yanagita]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]
</p><p class="cpabstractcardaffiliationlist">NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4372–4376&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Inspired by a human speech chain mechanism, a machine speech chain framework based on deep learning was recently proposed for the semi-supervised development of automatic speech recognition (ASR) and text-to-speech synthesis (TTS) systems. However, the mechanism to listen while speaking can be done only after receiving entire input sequences. Thus, there is a significant delay when encountering long utterances. By contrast, humans can listen to what they speak in real-time, and if there is a delay in hearing, they won’t be able to continue speaking. In this work, we propose an incremental machine speech chain towards enabling machine to listen while speaking in real-time. Specifically, we construct incremental ASR (ISR) and incremental TTS (ITTS) by letting both systems improve together through a short-term loop. Our experimental results reveal that our proposed framework is able to reduce delays due to long utterances while keeping a comparable performance to the non-incremental basic machine speech chain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tina Raissi|AUTHOR Tina Raissi]], [[Eugen Beck|AUTHOR Eugen Beck]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4377–4381&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Phoneme-based acoustic modeling of large vocabulary automatic speech recognition takes advantage of phoneme context. The large number of context-dependent (CD) phonemes and their highly varying statistics require tying or smoothing to enable robust training. Usually, Classification and Regression Trees are used for phonetic clustering, which is standard in Hidden Markov Model (HMM)-based systems. However, this solution introduces a secondary training objective and does not allow for end-to-end training. In this work, we address a direct phonetic context modeling for the hybrid Deep Neural Network (DNN)/HMM, that does not build on any phone clustering algorithm for the determination of the HMM state inventory. By performing different decompositions of the joint probability of the center phoneme state and its left and right contexts, we obtain a factorized network consisting of different components, trained jointly. Moreover, the representation of the phonetic context for the network relies on phoneme embeddings. The recognition accuracy of our proposed models on the Switchboard task is comparable and outperforms slightly the hybrid model using the standard state-tying decision trees.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sri Karlapati|AUTHOR Sri Karlapati]], [[Alexis Moinet|AUTHOR Alexis Moinet]], [[Arnaud Joly|AUTHOR Arnaud Joly]], [[Viacheslav Klimkov|AUTHOR Viacheslav Klimkov]], [[Daniel Sáez-Trigueros|AUTHOR Daniel Sáez-Trigueros]], [[Thomas Drugman|AUTHOR Thomas Drugman]]
</p><p class="cpabstractcardaffiliationlist">Amazon, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4387–4391&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prosody Transfer (PT) is a technique that aims to use the prosody from a source audio as a reference while synthesising speech. Fine-grained PT aims at capturing prosodic aspects like rhythm, emphasis, melody, duration, and loudness, from a source audio at a very granular level and transferring them when synthesising speech in a different target speaker’s voice. Current approaches for fine-grained PT suffer from source speaker leakage, where the synthesised speech has the voice identity of the source speaker as opposed to the target speaker. In order to mitigate this issue, they compromise on the quality of PT. In this paper, we propose CopyCat, a novel, many-to-many PT system that is robust to source speaker leakage, without using parallel data. We achieve this through a novel reference encoder architecture capable of capturing temporal prosodic representations which are robust to source speaker leakage. We compare CopyCat against a state-of-the-art fine-grained PT model through various subjective evaluations, where we show a relative improvement of 47% in the quality of prosody transfer and 14% in preserving the target speaker identity, while still maintaining the same naturalness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tuomo Raitio|AUTHOR Tuomo Raitio]], [[Ramya Rasipuram|AUTHOR Ramya Rasipuram]], [[Dan Castellani|AUTHOR Dan Castellani]]
</p><p class="cpabstractcardaffiliationlist">Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4432–4436&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modern neural text-to-speech (TTS) synthesis can generate speech that is indistinguishable from natural speech. However, the prosody of generated utterances often represents the average prosodic style of the database instead of having wide prosodic variation. Moreover, the generated prosody is solely defined by the input text, which does not allow for different styles for the same sentence. In this work, we train a sequence-to-sequence neural network conditioned on acoustic speech features to learn a latent prosody space with intuitive and meaningful dimensions. Experiments show that a model conditioned on sentence-wise pitch, pitch range, phone duration, energy, and spectral tilt can effectively control each prosodic dimension and generate a wide variety of speaking styles, while maintaining similar mean opinion score (4.23) to our Tacotron baseline (4.26).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Max Morrison|AUTHOR Max Morrison]]^^1^^, [[Zeyu Jin|AUTHOR Zeyu Jin]]^^2^^, [[Justin Salamon|AUTHOR Justin Salamon]]^^2^^, [[Nicholas J. Bryan|AUTHOR Nicholas J. Bryan]]^^2^^, [[Gautham J. Mysore|AUTHOR Gautham J. Mysore]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern University, USA; ^^2^^Adobe, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4437–4441&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech synthesis has recently seen significant improvements in fidelity, driven by the advent of neural vocoders and neural prosody generators. However, these systems lack intuitive user controls over prosody, making them unable to rectify prosody errors (e.g., misplaced emphases and contextually inappropriate emotions) or generate prosodies with diverse speaker excitement levels and emotions. We address these limitations with a user-controllable, context-aware neural prosody generator. Given a real or synthesized speech recording, our model allows a user to input prosody constraints for certain time frames and generates the remaining time frames from input text and contextual prosody. We also propose a pitch-shifting neural vocoder to modify input speech to match the synthesized prosody. Through objective and subjective evaluations we show that we can successfully incorporate user control into our prosody generation model without sacrificing the overall naturalness of the synthesized speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Matt Whitehill|AUTHOR Matt Whitehill]]^^1^^, [[Shuang Ma|AUTHOR Shuang Ma]]^^2^^, [[Daniel McDuff|AUTHOR Daniel McDuff]]^^3^^, [[Yale Song|AUTHOR Yale Song]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Washington, USA; ^^2^^SUNY Buffalo, USA; ^^3^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4442–4446&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Current multi-reference style transfer models for Text-to-Speech (TTS) perform sub-optimally on disjoints datasets, where one dataset contains only a single style class for one of the style dimensions. These models generally fail to produce style transfer for the dimension that is underrepresented in the dataset. In this paper, we propose an adversarial cycle consistency training scheme with paired and unpaired triplets to ensure the use of information from all style dimensions. During training, we incorporate //unpaired// triplets with randomly selected reference audio samples and encourage the synthesized speech to preserve the appropriate styles using adversarial cycle consistency. We use this method to transfer emotion from a dataset containing four emotions to a dataset with only a single emotion. This results in a 78% improvement in style transfer (based on emotion classification) with minimal reduction in fidelity and naturalness. In subjective evaluations our method was consistently rated as closer to the reference style than the baseline. Synthesized speech samples are available at: https://sites.google.com/view/adv-cycle-consistent-tts</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yang Gao|AUTHOR Yang Gao]]^^1^^, [[Weiyi Zheng|AUTHOR Weiyi Zheng]]^^2^^, [[Zhaojun Yang|AUTHOR Zhaojun Yang]]^^2^^, [[Thilo Köhler|AUTHOR Thilo Köhler]]^^2^^, [[Christian Fuegen|AUTHOR Christian Fuegen]]^^2^^, [[Qing He|AUTHOR Qing He]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Carnegie Mellon University, USA; ^^2^^Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4447–4451&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While modern TTS technologies have made significant advancements in audio quality, there is still a lack of behavior naturalness compared to conversing with people. We propose a style-embedded TTS system that generates styled responses based on the speech query style. To achieve this, the system includes a style extraction model that extracts a style embedding from the speech query, which is then used by the TTS to produce a matching response. We faced two main challenges: 1) only a small portion of the TTS training dataset has style labels, which is needed to train a multi-style TTS that respects different style embeddings during inference. 2) The TTS system and the style extraction model have disjoint training datasets. We need consistent style labels across these two datasets so that the TTS can learn to respect the labels produced by the style extraction model during inference. To solve these, we adopted a semi-supervised approach that uses the style extraction model to create style labels for the TTS dataset and applied transfer learning to learn the style embedding jointly. Our experiment results show user preference for the styled TTS responses and demonstrate the style-embedded TTS system’s capability of mimicking the speech query style.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Binghuai Lin|AUTHOR Binghuai Lin]]^^1^^, [[Liyuan Wang|AUTHOR Liyuan Wang]]^^1^^, [[Xiaoli Feng|AUTHOR Xiaoli Feng]]^^2^^, [[Jinsong Zhang|AUTHOR Jinsong Zhang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tencent, China; ^^2^^BLCU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4392–4396&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prosodic event detection plays an important role in spoken language processing tasks and Computer-Assisted Pronunciation Training (CAPT) systems [1]. Traditional methods for the detection of sentence stress and phrase boundaries rely on machine learning methods that model limited contextual information and account little for interaction between these two prosodic events. In this paper, we propose a hierarchical network modeling the contextual factors at the granularity of phoneme, syllable and word based on bidirectional Long Short-Term Memory (BLSTM). Moreover, to account for the inherent connection between sentence stress and phrase boundaries, we perform a joint modeling of these two important prosodic events with a multitask learning framework (MTL) which shares common prosodic features. We evaluate the network performance based on Aix-Machine Readable Spoken English Corpus (Aix-MARSEC). Experimental results show our proposed method obtains the F1-measure of 90% for sentence stress detection and 91% for phrase boundary detection, which outperforms the baseline utilizing conditional random field (CRF) by about 4% and 9% respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ajinkya Kulkarni|AUTHOR Ajinkya Kulkarni]], [[Vincent Colotte|AUTHOR Vincent Colotte]], [[Denis Jouvet|AUTHOR Denis Jouvet]]
</p><p class="cpabstractcardaffiliationlist">Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4397–4401&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a novel flow metric learning architecture in a parametric multispeaker expressive text-to-speech (TTS) system. We proposed inverse autoregressive flow (IAF) as a way to perform the variational inference, thus providing flexible approximate posterior distribution. The proposed approach condition the text-to-speech system on speaker embeddings so that latent space represents the emotion as semantic characteristics. For representing the speaker, we extracted speaker embeddings from the x-vector based speaker recognition model trained on speech data from many speakers. To predict the vocoder features, we used the acoustic model conditioned on the textual features as well as on the speaker embedding. We transferred the expressivity by using the mean of the latent variables for each emotion to generate expressive speech in different speaker’s voices for which no expressive speech data is available.

We compared the results obtained using flow-based variational inference with variational autoencoder as a baseline model. The performance measured by mean opinion score (MOS), speaker MOS, and expressive MOS shows that N-pair loss based deep metric learning along with IAF model improves the transfer of expressivity in the desired speaker’s voice in synthesized speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jae-Sung Bae|AUTHOR Jae-Sung Bae]], [[Hanbin Bae|AUTHOR Hanbin Bae]], [[Young-Sun Joo|AUTHOR Young-Sun Joo]], [[Junmo Lee|AUTHOR Junmo Lee]], [[Gyeong-Hoon Lee|AUTHOR Gyeong-Hoon Lee]], [[Hoon-Young Cho|AUTHOR Hoon-Young Cho]]
</p><p class="cpabstractcardaffiliationlist">NCSOFT, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4402–4406&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a controllable end-to-end text-to-speech (TTS) system to control the speaking speed (speed-controllable TTS; SCTTS) of synthesized speech with sentence-level speaking-rate value as an additional input. The speaking-rate value, the ratio of the number of input phonemes to the length of input speech, is adopted in the proposed system to control the speaking speed. Furthermore, the proposed SCTTS system can control the speaking speed while retaining other speech attributes, such as the pitch, by adopting the global style token-based style encoder. The proposed SCTTS does not require any additional well-trained model or an external speech database to extract phoneme-level duration information and can be trained in an end-to-end manner. In addition, our listening tests on fast-, normal-, and slow-speed speech showed that the SCTTS can generate more natural speech than other phoneme duration control approaches which increase or decrease duration at the same rate for the entire sentence, especially in the case of slow-speed speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shubhi Tyagi|AUTHOR Shubhi Tyagi]], [[Marco Nicolis|AUTHOR Marco Nicolis]], [[Jonas Rohnke|AUTHOR Jonas Rohnke]], [[Thomas Drugman|AUTHOR Thomas Drugman]], [[Jaime Lorenzo-Trueba|AUTHOR Jaime Lorenzo-Trueba]]
</p><p class="cpabstractcardaffiliationlist">Amazon, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4407–4411&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advances in Text-to-Speech (TTS) have improved quality and naturalness to near-human capabilities. But something which is still lacking in order to achieve human-like communication is the dynamic variations and adaptability of human speech in more complex scenarios. This work attempts to solve the problem of achieving a more dynamic and natural intonation in TTS systems, particularly for stylistic speech such as the newscaster speaking style. We propose a novel way of exploiting linguistic information in VAE systems to drive dynamic prosody generation. We analyze the contribution of both semantic and syntactic features. Our results show that the approach improves the prosody and naturalness for complex utterances as well as in Long Form Reading (LFR).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tom Kenter|AUTHOR Tom Kenter]], [[Manish Sharma|AUTHOR Manish Sharma]], [[Rob Clark|AUTHOR Rob Clark]]
</p><p class="cpabstractcardaffiliationlist">Google, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4412–4416&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The prosody of currently available speech synthesis systems can be unnatural due to the systems only having access to the text, possibly enriched by linguistic information such as part-of-speech tags and parse trees. We show that incorporating a BERT model in an RNN-based speech synthesis model — where the BERT model is pretrained on large amounts of unlabeled data, and fine-tuned to the speech domain — improves prosody. Additionally, we propose a way of handling arbitrarily long sequences with BERT. Our findings indicate that small BERT models work better than big ones, and that fine-tuning the BERT part of the model is pivotal for getting good results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Zhao|AUTHOR Yi Zhao]]^^1^^, [[Haoyu Li|AUTHOR Haoyu Li]]^^1^^, [[Cheng-I Lai|AUTHOR Cheng-I Lai]]^^2^^, [[Jennifer Williams|AUTHOR Jennifer Williams]]^^3^^, [[Erica Cooper|AUTHOR Erica Cooper]]^^1^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NII, Japan; ^^2^^MIT, USA; ^^3^^University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4417–4421&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Vector Quantized Variational AutoEncoders (VQ-VAE) are a powerful representation learning framework that can discover discrete groups of features from a speech signal without supervision. Until now, the VQ-VAE architecture has previously modeled individual types of speech features, such as only phones or only F0. This paper introduces an important extension to VQ-VAE for learning F0-related suprasegmental information simultaneously along with traditional phone features. The proposed framework uses two encoders such that the F0 trajectory and speech waveform are both input to the system, therefore two separate codebooks are learned. We used a WaveRNN vocoder as the decoder component of VQ-VAE. Our speaker-independent VQ-VAE was trained with raw speech waveforms from multi-speaker Japanese speech databases. Experimental results show that the proposed extension reduces F0 distortion of reconstructed speech for all unseen test speakers, and results in significantly higher preference scores from a listening test. We additionally conducted experiments using single-speaker Mandarin speech to demonstrate advantages of our architecture in another language which relies heavily on F0.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhen Zeng|AUTHOR Zhen Zeng]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Ning Cheng|AUTHOR Ning Cheng]], [[Jing Xiao|AUTHOR Jing Xiao]]
</p><p class="cpabstractcardaffiliationlist">Ping An Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4422–4426&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent neural speech synthesis systems have gradually focused on the control of prosody to improve the quality of synthesized speech, but they rarely consider the variability of prosody and the correlation between prosody and semantics together. In this paper, a prosody learning mechanism is proposed to model the prosody of speech based on TTS system, where the prosody information of speech is extracted from the mel-spectrum by a prosody learner and combined with the phoneme sequence to reconstruct the mel-spectrum. Meanwhile, the semantic features of text from the pre-trained language model is introduced to improve the prosody prediction results. In addition, a novel self-attention structure, named as local attention, is proposed to lift this restriction of input text length, where the relative position information of the sequence is modeled by the relative position matrices so that the position encodings is no longer needed. Experiments on English and Mandarin show that speech with more satisfactory prosody has obtained in our model. Especially in Mandarin synthesis, our proposed model outperforms baseline model with a MOS gap of 0.08, and the overall naturalness of the synthesized speech has been significantly improved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuma Shirahata|AUTHOR Yuma Shirahata]], [[Daisuke Saito|AUTHOR Daisuke Saito]], [[Nobuaki Minematsu|AUTHOR Nobuaki Minematsu]]
</p><p class="cpabstractcardaffiliationlist">University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4427–4431&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces a method of extracting coarse prosodic structure from fundamental frequency (F,,0,,) contours by using a discriminative approach such as deep neural networks (DNN), and applies the method for the parameter estimation of the Fujisaki model. In the conventional methods for the parameter estimation of the Fujisaki model, generative approaches, in which the estimation is treated as an inverse problem of the generation process, have been adopted. On the other hand, recent development of the discriminative approaches would enable us to treat the problem in a direct manner. To introduce a discriminative approach to the parameter estimation of the Fujisaki model in which the precise labels for the parameter are expensive, this study focuses on the similarities between the acoustic realization of the prosodic structure in F,,0,, contours and the sentence structure of the read text. In the proposed method, the sentence structure obtained from the text is utilized as the labels for the discriminative model, and the model estimates the //coarse// prosodic structure. Finally this structure is refined by using a conventional method for the parameter estimation. Experimental results demonstrate that the proposed method improves the estimation accuracy by 18% in terms of detection rate without using any auxiliary features at inference.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Merlin Albes|AUTHOR Merlin Albes]], [[Zhao Ren|AUTHOR Zhao Ren]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]]
</p><p class="cpabstractcardaffiliationlist">Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4546–4550&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In digital health applications, speech offers advantages over other physiological signals, in that it can be easily collected, transmitted, and stored using mobile and Internet of Things (IoT) technologies. However, to take full advantage of this positioning, speech-based machine learning models need to be deployed on devices that can have considerable memory and power constraints. These constraints are particularly apparent when attempting to deploy deep learning models, as they require substantial amounts of memory and data movement operations. Herein, we test the suitability of pruning and quantisation as two methods to compress the overall size of neural networks trained for a health-driven speech classification task. Key results presented on the Upper Respiratory Tract Infection Corpus indicate that pruning, then quantising a network can reduce the number of operational weights by almost 90%. They also demonstrate the overall size of the network can be reduced by almost 95%, as measured in MB, without affecting overall recognition performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anna Pompili|AUTHOR Anna Pompili]]^^1^^, [[Rubén Solera-Ureña|AUTHOR Rubén Solera-Ureña]]^^1^^, [[Alberto Abad|AUTHOR Alberto Abad]]^^1^^, [[Rita Cardoso|AUTHOR Rita Cardoso]]^^2^^, [[Isabel Guimarães|AUTHOR Isabel Guimarães]]^^3^^, [[Margherita Fabbri|AUTHOR Margherita Fabbri]]^^4^^, [[Isabel P. Martins|AUTHOR Isabel P. Martins]]^^2^^, [[Joaquim Ferreira|AUTHOR Joaquim Ferreira]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^INESC-ID Lisboa, Portugal; ^^2^^Universidade de Lisboa, Portugal; ^^3^^iMM, Portugal; ^^4^^CHU de Toulouse, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4591–4595&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Parkinson’s disease (PD) is a progressive degenerative disorder of the central nervous system characterized by motor and non-motor symptoms. As the disease progresses, patients alternate periods in which motor symptoms are mitigated due to medication intake (ON state) and periods with motor complications (OFF state). The time that patients spend in the OFF condition is currently the main parameter employed to assess pharmacological interventions and to evaluate the efficacy of different active principles. In this work, we present a system that combines automatic speech processing and deep learning techniques to classify the medication state of PD patients by leveraging personal speech-based bio-markers. We devise a speaker-dependent approach and investigate the relevance of different acoustic-prosodic feature sets. Results show an accuracy of 90.54% in a test task with mixed speech and an accuracy of 95.27% in a semi-spontaneous speech task. Overall, the experimental assessment shows the potentials of this approach towards the development of reliable, remote daily monitoring and scheduling of medication intake of PD patients.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nadee Seneviratne|AUTHOR Nadee Seneviratne]]^^1^^, [[James R. Williamson|AUTHOR James R. Williamson]]^^2^^, [[Adam C. Lammert|AUTHOR Adam C. Lammert]]^^3^^, [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]^^4^^, [[Carol Espy-Wilson|AUTHOR Carol Espy-Wilson]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Maryland at College Park, USA; ^^2^^MIT Lincoln Laboratory, USA; ^^3^^Worcester Polytechnic Institute, USA; ^^4^^MIT Lincoln Laboratory, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4551–4555&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Changes in speech production that occur as a result of psychomotor slowing, a key feature of Major Depressive Disorder (MDD), are used to non-invasively diagnose MDD. In previous work using data from seven subjects, we showed that using speech-inverted vocal tract variables (TVs) as a direct measure of articulation to quantify changes in the way speech is produced when depressed relative to being not depressed outperforms formant information as a proxy for articulatory information. In this paper, we made significant extensions by using more subjects, taking into account more eigenvalue features and incorporating TVs related to (1) place of articulation and (2) the glottal source. These additions result in a significant improvement in accuracy, particularly for free speech. As a baseline, we perform a similar analysis using higher-dimensional Mel Frequency Cepstral Coefficients (MFCCs).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Danai Xezonaki|AUTHOR Danai Xezonaki]]^^1^^, [[Georgios Paraskevopoulos|AUTHOR Georgios Paraskevopoulos]]^^1^^, [[Alexandros Potamianos|AUTHOR Alexandros Potamianos]]^^1^^, [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTUA, Greece; ^^2^^University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4556–4560&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work we propose a machine learning model for depression detection from transcribed clinical interviews. Depression is a mental disorder that impacts not only the subject’s mood but also the use of language. To this end we use a Hierarchical Attention Network to classify interviews of depressed subjects. We augment the attention layer of our model with a conditioning mechanism on linguistic features, extracted from affective lexica. Our analysis shows that individuals diagnosed with depression use affective language to a greater extent than not-depressed. Our experiments show that external affective information improves the performance of the proposed architecture in the General Psychotherapy Corpus and the DAIC-WoZ 2017 depression datasets, achieving state-of-the-art 71.6 and 70.3 using the test set, F1-scores respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhaocheng Huang|AUTHOR Zhaocheng Huang]]^^1^^, [[Julien Epps|AUTHOR Julien Epps]]^^1^^, [[Dale Joachim|AUTHOR Dale Joachim]]^^2^^, [[Brian Stasak|AUTHOR Brian Stasak]]^^1^^, [[James R. Williamson|AUTHOR James R. Williamson]]^^3^^, [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UNSW Sydney, Australia; ^^2^^Sonde Health, USA; ^^3^^MIT Lincoln Laboratory, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4561–4565&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Depression disorders are a major growing concern worldwide, especially given the unmet need for widely deployable depression screening for use in real-world environments. Speech-based depression screening technologies have shown promising results, but primarily in systems that are trained using laboratory-based recorded speech. They do not generalize well on data from more naturalistic settings. This paper addresses the generalizability issue by proposing multiple adaptation strategies that update pre-trained models based on a dilated convolutional neural network (CNN) framework, which improve depression detection performance in both clean and naturalistic environments. Experimental results on two depression corpora show that feature representations in CNN layers need to be adapted to accommodate environmental changes, and that increases in data quantity and quality are helpful for pre-training models for adaptation. The cross-corpus adapted systems produce relative improvements of 29.4% and 17.2% in unweighted average recall over non-adapted systems for both clean and naturalistic corpora, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]^^1^^, [[Anita Bagi|AUTHOR Anita Bagi]]^^2^^, [[Szilvia Szalóki|AUTHOR Szilvia Szalóki]]^^2^^, [[István Szendi|AUTHOR István Szendi]]^^2^^, [[Ildikó Hoffmann|AUTHOR Ildikó Hoffmann]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MTA-SZTE RGAI, Hungary; ^^2^^University of Szeged, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4566–4570&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Schizophrenia is a heterogeneous chronic and severe mental disorder. There are several different theories for the development of schizophrenia from an etiological point of view: neurochemical, neuroanatomical, psychological and genetic factors may also be present in the background of the disease. In this study, we examined spontaneous speech productions by patients suffering from schizophrenia (SCH) and bipolar disorder (BD). We extracted 15 temporal parameters from the speech excerpts and used machine learning techniques for distinguishing the SCH and BD groups, their subgroups (SCH-S and SCH-Z) and subtypes (BD-I and BD-II). Our results indicated, that there is a notable difference between spontaneous speech productions of certain subgroups, while some appears to be indistinguishable for the used classification model. Firstly, SCH and BD groups were found to be different. Secondly, the results of SCH-S subgroup were distinct from BD. Thirdly, the spontaneous speech of the SCH-Z subgroup was found to be very similar to the BD-I, however, it was sharply distinct from BD-II. Our detailed examination highlighted the indistinguishable subgroups and led to us to make our S and Z theory more clarified.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mark Huckvale|AUTHOR Mark Huckvale]], [[András Beke|AUTHOR András Beke]], [[Mirei Ikushima|AUTHOR Mirei Ikushima]]
</p><p class="cpabstractcardaffiliationlist">University College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4571–4575&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper looks in more detail at the Interspeech 2019 computational paralinguistics challenge on the prediction of sleepiness ratings from speech. In this challenge, teams were asked to train a regression model to predict sleepiness from samples of the Düsseldorf Sleepy Language Corpus (DSLC). This challenge was notable because the performance of all entrants was uniformly poor, with even the winning system only achieving a correlation of r=0.37. We look at whether the task itself is achievable, and whether the corpus is suited to training a machine learning system for the task. We perform a listening experiment using samples from the corpus and show that a group of human listeners can achieve a correlation of r=0.7 on this task, although this is mainly by classifying the recordings into one of three sleepiness groups. We show that the corpus, because of its construction, confounds variation with sleepiness and variation with speaker identity, and this was the reason that machine learning systems failed to perform well. We conclude that sleepiness rating prediction from voice is not an impossible task, but that good performance requires more information about sleepy speech and its variability across listeners than is available in the DSLC corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kristin J. Teplansky|AUTHOR Kristin J. Teplansky]]^^1^^, [[Alan Wisler|AUTHOR Alan Wisler]]^^1^^, [[Beiming Cao|AUTHOR Beiming Cao]]^^1^^, [[Wendy Liang|AUTHOR Wendy Liang]]^^1^^, [[Chad W. Whited|AUTHOR Chad W. Whited]]^^2^^, [[Ted Mau|AUTHOR Ted Mau]]^^3^^, [[Jun Wang|AUTHOR Jun Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Austin, USA; ^^2^^Austin ENT Clinic, USA; ^^3^^UT Southwestern Medical Center, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4576–4580&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A laryngectomy is the surgical removal of the larynx which results in the loss of phonation. The aim of this study was to characterize tongue and lip movements during speech produced by individuals who have had a laryngectomy. EMA (electromagnetic articulography) was used to derive movement data from the tongue and lips of nine speakers (four alaryngeal and five typical). The kinematic metrics included movement duration, range, speed, and cumulative path distance. We also used a support vector machine (SVM) to classify alaryngeal and healthy speech movement patterns. Our preliminary results indicated that alaryngeal articulation is longer in duration than healthy speakers. Alaryngeal speakers also use larger lateral tongue movements and move the tongue back at a slower speed than healthy speakers. The results from the SVM model also indicates that alaryngeal articulatory movement patterns are distinct from healthy speakers. Taken together, these findings suggest that there are differences in articulatory behavior that occur after the removal of the larynx. It may be helpful to consider the distinct articulatory motion patterns of alaryngeal speech in clinical practice and in the development of technologies (e.g., silent speech interfaces) that assist to provide an intelligible form of speech for this patient population.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhengjun Yue|AUTHOR Zhengjun Yue]], [[Heidi Christensen|AUTHOR Heidi Christensen]], [[Jon Barker|AUTHOR Jon Barker]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4581–4585&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic recognition of dysarthric speech is a very challenging research problem where performances still lag far behind those achieved for typical speech. The main reason is the lack of suitable training data to accommodate for the large mismatch seen between dysarthric and typical speech. Only recently has focus moved from single-word tasks to exploring continuous speech ASR needed for dictation and most voice-enabled interfaces. This paper investigates improvements to dysarthric continuous ASR. In particular, we demonstrate the effectiveness of using unsupervised autoencoder-based bottleneck (AE-BN) feature extractor trained on out-of-domain (OOD) LibriSpeech data. We further explore multi-task optimisation techniques shown to benefit typical speech ASR. We propose a 5-fold cross-training setup on the widely used TORGO dysarthric database. A setup we believe is more suitable for this low-resource data domain. Results show that adding the proposed AE-BN features achieves an average absolute (word error rate) WER improvement of 2.63% compared to the baseline system. A further reduction of 2.33% and 0.65% absolute WER is seen when applying monophone regularisation and joint optimisation techniques, respectively. In general, the ASR system employing monophone regularisation trained on AE-BN features exhibits the best performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jhansi Mallela|AUTHOR Jhansi Mallela]]^^1^^, [[Aravind Illa|AUTHOR Aravind Illa]]^^1^^, [[Yamini Belur|AUTHOR Yamini Belur]]^^2^^, [[Nalini Atchayaram|AUTHOR Nalini Atchayaram]]^^2^^, [[Ravi Yadav|AUTHOR Ravi Yadav]]^^2^^, [[Pradeep Reddy|AUTHOR Pradeep Reddy]]^^2^^, [[Dipanjan Gope|AUTHOR Dipanjan Gope]]^^1^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^NIMHANS, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4586–4590&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Analysis of speech waveform through automated methods in patients with Amyotrophic Lateral Sclerosis (ALS), and Parkinson’s disease (PD) can be used for early diagnosis and monitoring disease progression. Many works in the past have used different acoustic features for the classification of patients with ALS and PD with healthy controls (HC). In this work, we propose a data-driven approach to learn representations from raw speech waveform. Our model comprises of 1-D CNN layer to extract representations from raw speech followed by BLSTM layers for the classification tasks. We consider 3 different classification tasks (ALS vs HC), (PD vs HC), and (ALS vs PD). We perform each classification task using four different speech stimuli in two scenarios: i) trained and tested in a stimulus-specific manner, ii) trained on data pooled from all stimuli, and test on each stimulus separately. Experiments with 60 ALS, 60 PD, and 60 HC show that the frequency responses of the learned 1-D CNN filters are low pass in nature, and the center frequencies lie below 1kHz. The learned representations form raw speech perform better than MFCC which is considered as baseline. Experiments with pooled models yield a better result compared to the task-specific models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yangyang Shi|AUTHOR Yangyang Shi]], [[Yongqiang Wang|AUTHOR Yongqiang Wang]], [[Chunyang Wu|AUTHOR Chunyang Wu]], [[Christian Fuegen|AUTHOR Christian Fuegen]], [[Frank Zhang|AUTHOR Frank Zhang]], [[Duc Le|AUTHOR Duc Le]], [[Ching-Feng Yeh|AUTHOR Ching-Feng Yeh]], [[Michael L. Seltzer|AUTHOR Michael L. Seltzer]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4996–5000&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformers, originally proposed for natural language processing (NLP) tasks, have recently achieved great success in automatic speech recognition (ASR). However, adjacent acoustic units (i.e., frames) are highly correlated, and long-distance dependencies between them are weak, unlike text units. It suggests that ASR will likely benefit from sparse and localized attention. In this paper, we propose Weak-Attention Suppression (WAS), a method that dynamically induces sparsity in attention probabilities. We demonstrate that WAS leads to consistent Word Error Rate (WER) improvement over strong transformer baselines. On the widely used LibriSpeech benchmark, our proposed method reduced WER by 10% on test-clean and 5% on test-other for streamable transformers, resulting in a new state-of-the-art among streaming models. Further analysis shows that WAS learns to suppress attention of non-critical and redundant continuous acoustic frames, and is more likely to suppress past frames rather than future ones. It indicates the importance of lookahead in attention-based ASR models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Liang Lu|AUTHOR Liang Lu]], [[Changliang Liu|AUTHOR Changliang Liu]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Yifan Gong|AUTHOR Yifan Gong]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5041–5045&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While recurrent neural networks still largely define state-of-the-art speech recognition systems, the Transformer network has been proven to be a competitive alternative, especially in the offline condition. Most studies with Transformers have been constrained in a relatively small scale setting, and some forms of data argumentation approaches are usually applied to combat the data sparsity issue. In this paper, we aim at understanding the behaviors of Transformers in the large-scale speech recognition setting, where we have used around 65,000 hours of training data. We investigated various aspects on scaling up Transformers, including model initialization, warmup training as well as different Layer Normalization strategies. In the streaming condition, we compared the widely used attention mask based future context lookahead approach to the Transformer-XL network. From our experiments, we show that Transformers can achieve around 6% relative word error rate (WER) reduction compared to the BLSTM baseline in the offline fashion, while in the streaming fashion, Transformer-XL is comparable to LC-BLSTM with 800 millisecond latency constraint.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenyong Huang|AUTHOR Wenyong Huang]], [[Wenchao Hu|AUTHOR Wenchao Hu]], [[Yu Ting Yeung|AUTHOR Yu Ting Yeung]], [[Xiao Chen|AUTHOR Xiao Chen]]
</p><p class="cpabstractcardaffiliationlist">Huawei Technologies, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5001–5005&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformer has achieved competitive performance against state-of-the-art end-to-end models in automatic speech recognition (ASR), and requires significantly less training time than RNN-based models. The original Transformer, with encoder-decoder architecture, is only suitable for offline ASR. It relies on an attention mechanism to learn alignments, and encodes input audio bidirectionally. The high computation cost of Transformer decoding also limits its use in production streaming systems. To make Transformer suitable for streaming ASR, we explore Transducer framework as a streamable way to learn alignments. For audio encoding, we apply unidirectional Transformer with interleaved convolution layers. The interleaved convolution layers are used for modeling future context which is important to performance. To reduce computation cost, we gradually downsample acoustic input, also with the interleaved convolution layers. Moreover, we limit the length of history context in self-attention to maintain constant computation cost for each decoding step. We show that this architecture, named Conv-Transformer Transducer, achieves competitive performance on LibriSpeech dataset (3.6% WER on test-clean) without external language models. The performance is comparable to previously published streamable Transformer Transducer and strong hybrid streaming ASR systems, and is achieved with smaller look-ahead window (140 ms), fewer parameters and lower frame rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Song Li|AUTHOR Song Li]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]], [[Lingling Liu|AUTHOR Lingling Liu]]
</p><p class="cpabstractcardaffiliationlist">Xiamen University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5006–5010&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, the Transformer-based end-to-end speech recognition system has become a state-of-the-art technology. However, one prominent problem with current end-to-end speech recognition systems is that an extensive amount of paired data are required to achieve better recognition performance. In order to grapple with such an issue, we propose two unsupervised pre-training strategies for the encoder and the decoder of Transformer respectively, which make full use of unpaired data for training. In addition, we propose a new semi-supervised fine-tuning method named multi-task semantic knowledge learning to strengthen the Transformer’s ability to learn about semantic knowledge, thereby improving the system performance. We achieve the best CER with our proposed methods on AISHELL-1 test set: 5.9%, which exceeds the best end-to-end model by 10.6% relative CER. Moreover, relative CER reduction of 20.3% and 17.8% are obtained for low-resource Mandarin and English data sets, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takaaki Hori|AUTHOR Takaaki Hori]], [[Niko Moritz|AUTHOR Niko Moritz]], [[Chiori Hori|AUTHOR Chiori Hori]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]
</p><p class="cpabstractcardaffiliationlist">MERL, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5011–5015&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents an approach to long-context end-to-end automatic speech recognition (ASR) using Transformers, aiming at improving ASR accuracy for long audio recordings such as lecture and conversational speeches. Most end-to-end ASR systems are basically designed to recognize independent utterances, but contextual information (e.g., speaker or topic) over multiple utterances is known to be useful for ASR. There are some prior studies on RNN-based models that utilize such contextual information, but very few on Transformers, which are becoming more popular in end-to-end ASR. In this paper, we propose a Transformer-based architecture that accepts multiple consecutive utterances at the same time and predicts an output sequence for the last utterance. This is repeated in a sliding-window fashion with one-utterance shifts to recognize the entire recording. Based on this framework, we also investigate how to design the context window and train the model effectively in monologue (one speaker) and dialogue (two speakers) scenarios. We demonstrate the effectiveness of our approach using monologue benchmarks on CSJ and TED-LIUM3 and dialogue benchmarks on SWITCHBOARD and HKUST, showing significant error reduction from single-utterance ASR baselines with or without speaker i-vectors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinyuan Zhou|AUTHOR Xinyuan Zhou]]^^1^^, [[Grandee Lee|AUTHOR Grandee Lee]]^^2^^, [[Emre Yılmaz|AUTHOR Emre Yılmaz]]^^2^^, [[Yanhua Long|AUTHOR Yanhua Long]]^^1^^, [[Jiaen Liang|AUTHOR Jiaen Liang]]^^3^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SHNU, China; ^^2^^NUS, Singapore; ^^3^^Unisound, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5016–5020&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformer has shown impressive performance in automatic speech recognition. It uses an encoder-decoder structure with self-attention to learn the relationship between high-level representation of source inputs and embedding of target outputs. In this paper, we propose a novel decoder structure that features a self-and-mixed attention decoder (SMAD) with a deep acoustic structure (DAS) to improve the acoustic representation of Transformer-based LVCSR. Specifically, we introduce a self-attention mechanism to learn a multi-layer deep acoustic structure for multiple levels of acoustic abstraction. We also design a mixed attention mechanism that learns the alignment between different levels of acoustic abstraction and its corresponding linguistic information simultaneously in a shared embedding space. The ASR experiments on Aishell-1 show that the proposed structure achieves CERs of 4.8% on the dev set and 5.1% on the test set, which are the best reported results on this task to the best of our knowledge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yingzhu Zhao|AUTHOR Yingzhu Zhao]]^^1^^, [[Chongjia Ni|AUTHOR Chongjia Ni]]^^2^^, [[Cheung-Chi Leung|AUTHOR Cheung-Chi Leung]]^^2^^, [[Shafiq Joty|AUTHOR Shafiq Joty]]^^1^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^1^^, [[Bin Ma|AUTHOR Bin Ma]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTU, Singapore; ^^2^^Alibaba Group, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5021–5025&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformer model has made great progress in speech recognition. However, compared with models with iterative computation, transformer model has fixed encoder and decoder depth, thus losing the recurrent inductive bias. Besides, finding the optimal number of layers involves trial-and-error attempts. In this paper, the universal speech transformer is proposed, which to the best of our knowledge, is the first work to use universal transformer for speech recognition. It generalizes the speech transformer with dynamic numbers of encoder/decoder layers, which can relieve the burden of tuning depth related hyperparameters. Universal transformer adds the depth and positional embeddings repeatedly for each layer, which dilutes the acoustic information carried by hidden representation, and it also performs a partial update of hidden vectors between layers, which is less efficient especially on the very deep models. For better use of universal transformer, we modify its processing framework by removing the depth embedding and only adding the positional embedding once at transformer encoder frontend. Furthermore, to update the hidden vectors efficiently, especially on the very deep models, we adopt a full update. Experiments on LibriSpeech, Switchboard and AISHELL-1 datasets show that our model outperforms a baseline by 3.88%–13.7%, and surpasses other model with less computation cost.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Ye Bai|AUTHOR Ye Bai]], [[Shuai Zhang|AUTHOR Shuai Zhang]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5026–5030&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Non-autoregressive transformer models have achieved extremely fast inference speed and comparable performance with autoregressive sequence-to-sequence models in neural machine translation. Most of the non-autoregressive transformers decode the target sequence from a predefined-length mask sequence. If the predefined length is too long, it will cause a lot of redundant calculations. If the predefined length is shorter than the length of the target sequence, it will hurt the performance of the model. To address this problem and improve the inference speed, we propose a spike-triggered non-autoregressive transformer model for end-to-end speech recognition, which introduces a CTC module to predict the length of the target sequence and accelerate the convergence. All the experiments are conducted on a public Chinese mandarin dataset AISHELL-1. The results show that the proposed model can accurately predict the length of the target sequence and achieve a competitive performance with the advanced transformers. What’s more, the model even achieves a real-time factor of 0.0056, which exceeds all mainstream speech recognition models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yingzhu Zhao|AUTHOR Yingzhu Zhao]]^^1^^, [[Chongjia Ni|AUTHOR Chongjia Ni]]^^2^^, [[Cheung-Chi Leung|AUTHOR Cheung-Chi Leung]]^^2^^, [[Shafiq Joty|AUTHOR Shafiq Joty]]^^1^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^1^^, [[Bin Ma|AUTHOR Bin Ma]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTU, Singapore; ^^2^^Alibaba Group, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5031–5035&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformer, a state-of-the-art neural network architecture, has been used successfully for different sequence-to-sequence transformation tasks. This model architecture disperses the attention distribution over entire input to learn long-term dependencies, which is important for some sequence-to-sequence tasks, such as neural machine translation and text summarization. However, automatic speech recognition (ASR) has a characteristic to have monotonic alignment between text output and speech input. Techniques like Connectionist Temporal Classification (CTC), RNN Transducer (RNN-T) and Recurrent Neural Aligner (RNA) build on top of this monotonic alignment and use local encoded speech representations for corresponding token prediction. In this paper, we present an effective cross attention biasing technique in transformer that takes monotonic alignment between text output and speech input into consideration by making use of cross attention weights. Specifically, a Gaussian mask is applied on cross attention weights to limit the input speech context range locally given alignment information. We further introduce a regularizer for alignment regularization. Experiments on LibriSpeech dataset find that our proposed model can obtain improved output-input alignment for ASR, and yields 14.5%–25.0% relative word error rate (WER) reductions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anmol Gulati|AUTHOR Anmol Gulati]], [[James Qin|AUTHOR James Qin]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[Niki Parmar|AUTHOR Niki Parmar]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Jiahui Yu|AUTHOR Jiahui Yu]], [[Wei Han|AUTHOR Wei Han]], [[Shibo Wang|AUTHOR Shibo Wang]], [[Zhengdong Zhang|AUTHOR Zhengdong Zhang]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Ruoming Pang|AUTHOR Ruoming Pang]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5036–5040&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently Transformer and Convolution neural network (CNN) based models have shown promising results in Automatic Speech Recognition (ASR), outperforming Recurrent neural networks (RNNs). Transformer models are good at capturing content-based global interactions, while CNNs exploit local features effectively. In this work, we achieve the best of both worlds by studying how to combine convolution neural networks and transformers to model both local and global dependencies of an audio sequence in a parameter-efficient way. To this regard, we propose the convolution-augmented transformer for speech recognition, named //Conformer//. //Conformer// significantly outperforms the previous Transformer and CNN based models achieving state-of-the-art accuracies. On the widely used LibriSpeech benchmark, our model achieves WER of 2.1%/4.3% without using a language model and 1.9%/3.9% with an external language model on test/testother. We also observe competitive performance of 2.7%/6.3% with a small model of only 10M parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Masahito Togami|AUTHOR Masahito Togami]], [[Robin Scheibler|AUTHOR Robin Scheibler]]
</p><p class="cpabstractcardaffiliationlist">LINE, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5046–5050&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a direction-of-arrival (DOA) estimation technique which assumes that speech sources are sufficiently sparse and there is only one active speech source at each time-frequency (T-F) point. The proposed method estimates the DOA of the active speech source at each T-F point. A typical way for DOA estimation is based on grid-searching for all possible directions. However, computational cost of grid-searching is proportional to the resolution of search area. Instead of accurate grid-searching, the proposed method adopts rough grid-searching followed by an iterative parameter optimization based on Majorization-Minimization (MM) algorithm. We propose a parameter optimization method which guarantees a monotonical increase of the objective function. Experimental results show that the proposed method estimates DOAs of speech sources more accurately than conventional DOA estimation methods when computational cost of each method is almost the same.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Xue|AUTHOR Wei Xue]], [[Ying Tong|AUTHOR Ying Tong]], [[Chao Zhang|AUTHOR Chao Zhang]], [[Guohong Ding|AUTHOR Guohong Ding]], [[Xiaodong He|AUTHOR Xiaodong He]], [[Bowen Zhou|AUTHOR Bowen Zhou]]
</p><p class="cpabstractcardaffiliationlist">JD.com, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>

</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5091–5095&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The performance of sound event localization and detection (SELD) degrades in source-overlapping cases since features of different sources collapse with each other, and the network tends to fail to learn to separate these features effectively. In this paper, by leveraging the conventional microphone array signal processing to generate comprehensive representations for SELD, we propose a new SELD method based on multiple direction of arrival (DOA) beamforming and multi-task learning. By using multiple beamformers to extract the signals from different DOAs, the sound field is more diversely described, and specialised representations of target source and noises can be obtained. With labelled training data, the steering vector is estimated based on the cross-power spectra (CPS) and the signal presence probability (SPP), which eliminates the need of knowing the array geometry. We design two networks for sound event localization (SED) and sound source localization (SSL) and use a multi-task learning scheme for SED, in which the SSL-related task act as a regularization. Experimental results using the database of DCASE2019 SELD task show that the proposed method achieves the state-of-art performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoli Zhong|AUTHOR Xiaoli Zhong]]^^1^^, [[Hao Song|AUTHOR Hao Song]]^^2^^, [[Xuejie Liu|AUTHOR Xuejie Liu]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SCUT, China; ^^2^^GDUT, China; ^^3^^SCNU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5051–5055&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In virtual auditory display, the accurate simulation of early reflection is helpful to guarantee audio fidelity and enhance immersion. However, the early reflection may not be easily distinguished from the direct sound due to the masking effect. This work investigated the spatial resolution of early reflection for speech and white noise under different conditions, in which three-down-one-up adaptive strategy with three-interval-three-alternative forced-choice (3I-3AFC) was employed. Results show that, for both speech and white noise, the spatial resolution of early reflection decreases with the increasing deviation of reflection orientation relative to the direct sound, and has no relationship with the time delay; Moreover, the spatial resolution of early reflection for speech is always lower than that for white noise under the same condition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aditya Raikar|AUTHOR Aditya Raikar]]^^1^^, [[Karan Nathwani|AUTHOR Karan Nathwani]]^^2^^, [[Ashish Panda|AUTHOR Ashish Panda]]^^1^^, [[Sunil Kumar Kopparapu|AUTHOR Sunil Kumar Kopparapu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^TCS Innovation Labs Mumbai, India; ^^2^^IIT Jammu, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5056–5060&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Room Impulse Response (RIR) measurement is done using a microphone and loudspeaker pair and is prone to error due to error in measurement of the microphone position. In literature, the adverse impact of ambient noise on RIR measurement is mostly explored. However, the impact of microphone position error on full, early and late RIR measurements have never been explored. In this paper, we investigate the error in RIR introduced due to error in measurement of the microphone position. We also study the impact of this on the quality and intelligibility of speech. Our analysis shows that the impact of error in microphone position measurement on RIR is as adverse as that of the ambient noise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuwen Deng|AUTHOR Shuwen Deng]], [[Wolfgang Mack|AUTHOR Wolfgang Mack]], [[Emanuël A.P. Habets|AUTHOR Emanuël A.P. Habets]]
</p><p class="cpabstractcardaffiliationlist">AudioLabs, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5061–5065&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The reverberation time, T,,60,,, is an important acoustic parameter in speech and acoustic signal processing. Often, the T,,60,, is unknown and blind estimation from a single-channel measurement is required. State-of-the-art T,,60,, estimation is achieved by a convolutional neural network (CNN) which maps a feature representation of the speech to the T,,60,,. The temporal input length of the CNN is fixed. Time-varying scenarios, e.g., robot audition, require continuous T,,60,, estimation in an online fashion, which is computationally heavy using the CNN. We propose to use a convolutional recurrent neural network (CRNN) for blind T,,60,, estimation as it combines the parametric efficiency of CNNs with the online estimation of recurrent neural networks and, in contrast to CNNs, can process time-sequences of variable length. We evaluated the proposed CRNN on the //Acoustic Characterization of Environments Challenge// dataset for different input lengths. Our proposed method outperforms the state-of-the-art CNN approach even for shorter inputs at the cost of more trainable parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wolfgang Mack|AUTHOR Wolfgang Mack]], [[Shuwen Deng|AUTHOR Shuwen Deng]], [[Emanuël A.P. Habets|AUTHOR Emanuël A.P. Habets]]
</p><p class="cpabstractcardaffiliationlist">AudioLabs, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5066–5070&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic parameters, like the direct-to-reverberation ratio (DRR), can be used in audio processing algorithms to perform, e.g., dereverberation or in audio augmented reality. Often, the DRR is not available and has to be estimated blindly from recorded audio signals. State-of-the-art DRR estimation is achieved by deep neural networks (DNNs), which directly map a feature representation of the acquired signals to the DRR. Motivated by the equality of the signal-to-reverberation ratio and the (channel-based) DRR under certain conditions, we formulate single-channel DRR estimation as an extraction task of two signal components from the recorded audio. The DRR can be obtained by inserting the estimated signals in the definition of the DRR. The extraction is performed using time-frequency masks. The masks are estimated by a DNN trained end-to-end to minimize the mean-squared error between the estimated and the oracle DRR. We conduct experiments with different pre-processing and mask estimation schemes. The proposed method outperforms state-of-the-art single- and multi-channel methods on the ACE challenge data corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hanan Beit-On|AUTHOR Hanan Beit-On]]^^1^^, [[Vladimir Tourbabin|AUTHOR Vladimir Tourbabin]]^^2^^, [[Boaz Rafaely|AUTHOR Boaz Rafaely]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BGU, Israel; ^^2^^Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5071–5075&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A common approach to overcoming the effect of reverberation in speaker localization is to identify the time-frequency (TF) bins in which the direct path is dominant, and then to use only these bins for estimation. Various direct-path dominance (DPD) tests have been proposed for identifying the direct-path bins. However, for a two-microphone binaural array, tests that do not employ averaging over TF bins seem to fail. In this paper, this anomaly is studied by comparing two DPD tests, in which only one has been designed to employ averaging over TF bins. An analysis of these tests shows that, in the binaural case, a TF bin that is dominated by multiple reflections may be similar to a bin with a single source. This insight can explain the high false alarm rate encountered with tests that do not employ averaging. Also, it is shown that incorporating averaging over TF bins can reduce the false alarm rate. A simulation study is presented that verifies the importance of TF averaging for a reliable selection of direct-path bins in the binaural case.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yonggang Hu|AUTHOR Yonggang Hu]], [[Prasanga N. Samarasinghe|AUTHOR Prasanga N. Samarasinghe]], [[Thushara D. Abhayapala|AUTHOR Thushara D. Abhayapala]]
</p><p class="cpabstractcardaffiliationlist">Australian National University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5076–5080&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Over recent years, spatial acoustic signal processing using higher order microphone arrays in the spherical harmonics domain has been a popular research topic. This paper uses a recently introduced source feature called the //relative harmonic coefficients// to develop an acoustic signal enhancement approach in noisy environments. This proposed method enables to extract the clean spherical harmonic coefficients from noisy higher order microphone recordings. Hence, this technique can be used as a pre-processing tool for noise-free measurements required by many spatial audio applications. We finally present a simulation study analyzing the performance of this approach in far field noisy environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[B.H.V.S. Narayana Murthy|AUTHOR B.H.V.S. Narayana Murthy]]^^1^^, [[J.V. Satyanarayana|AUTHOR J.V. Satyanarayana]]^^2^^, [[Nivedita Chennupati|AUTHOR Nivedita Chennupati]]^^1^^, [[B. Yegnanarayana|AUTHOR B. Yegnanarayana]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IIIT Hyderabad, India; ^^2^^Research Centre Imarat, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5081–5085&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a method of obtaining the instantaneous time delay of broadband signals collected at two spatially separated microphones in a live room. The method is based on using the complex signals at the output of single frequency filtering (SFF) of the microphone signals. We show that the complex SFF spectrum at each instant can be used to obtain the instantaneous time delay (TD). By using only the phase of the SFF spectrum, it is possible to get a better estimate of the TD, as in the case of the standard GCC-PHAT method. We show the effectiveness of the proposed method for real microphone signals collected in a live room. Robustness of the method is tested for additive babble noise at 0 dB for the live microphone data. Since we get the TD at every sampling instant, it may be possible to exploit this feature for two-channel multi-speaker separation and for tracking a moving speaker.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hao Wang|AUTHOR Hao Wang]], [[Kai Chen|AUTHOR Kai Chen]], [[Jing Lu|AUTHOR Jing Lu]]
</p><p class="cpabstractcardaffiliationlist">Nanjing University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 5086–5090&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It has been noted that the identification of the time-frequency bins dominated by the contribution from the direct propagation of the target speaker can significantly improve the robustness of the direction-of-arrival estimation. However, the correct extraction of the direct-path sound is challenging especially in adverse environments. In this paper, a U-net based direct-path dominance test method is proposed. Exploiting the efficient segmentation capability of the U-net architecture, the direct-path information can be effectively retrieved from a dedicated multi-task neural network. Moreover, the training and inference of the neural network only need the input of a single microphone, circumventing the problem of array-structure dependence faced by common end-to-end deep learning based methods. Simulations demonstrate that significantly higher estimation accuracy can be achieved in high reverberant and low signal-to-noise ratio environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chao Zhang|AUTHOR Chao Zhang]]^^1^^, [[Junjie Cheng|AUTHOR Junjie Cheng]]^^1^^, [[Yanmei Gu|AUTHOR Yanmei Gu]]^^1^^, [[Huacan Wang|AUTHOR Huacan Wang]]^^1^^, [[Jun Ma|AUTHOR Jun Ma]]^^1^^, [[Shaojun Wang|AUTHOR Shaojun Wang]]^^2^^, [[Jing Xiao|AUTHOR Jing Xiao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ping An Technology, China; ^^2^^Ping An Technology, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4596–4600&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we describe a novel replay detection system for the ASVspoof 2019 challenge. The objective of this challenge is to distinguish arbitrarily audio files from bona fide or spoofing attacks, where spoofing attacking includes replay attacks, text-to-speech and voice conversions. Our replay detection system is a pipeline system with three aspects: feature engineering, DNN models, and score fusion. Firstly, logspec is extracted as input features according to previous research works where spectrum augmentation is applied during training stage to boost performance under limited training data. Secondly, DNN models part includes three major models: SEnet, DenseNet, and our proposed model, channel consistency DenseNeXt, where binary cross entropy loss and center loss are applied as training objectives. Finally, score fusion is applied to all three DNN models in order to obtain primary system results. The experiment results show that for our best single system, channel consistency DenseNeXt, t-DCF and EER are 0.0137 and 0.46% on physical access evaluation set respectively. The performance of primary system obtains 0.00785 and 0.282% in terms of t-DCF and EER respectively. This is a 96.8% improvement compared to the baseline system CQCC-GMM and it achieves state-of-the-art performance in PA challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michał Kośmider|AUTHOR Michał Kośmider]]
</p><p class="cpabstractcardaffiliationlist">Samsung, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4641–4645&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Machine learning algorithms, when trained on audio recordings from a limited set of devices, may not generalize well to samples recorded using other devices with different frequency responses. In this work, a relatively straightforward method is introduced to address this problem. Two variants of the approach are presented. First requires aligned examples from multiple devices, the second approach alleviates this requirement. This method works for both time and frequency domain representations of audio recordings. Further, a relation to standardization and Cepstral Mean Subtraction is analysed. The proposed approach becomes effective even when very few examples are provided. This method was developed during the //Detection and Classification of Acoustic Scenes and Events// (DCASE) 2019 challenge and won the 1st place in the scenario with mismatched recording devices with the accuracy of 75%. Source code for the experiments can be found online.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Przemyslaw Falkowski-Gilski|AUTHOR Przemyslaw Falkowski-Gilski]]^^1^^, [[Grzegorz Debita|AUTHOR Grzegorz Debita]]^^2^^, [[Marcin Habrych|AUTHOR Marcin Habrych]]^^3^^, [[Bogdan Miedzinski|AUTHOR Bogdan Miedzinski]]^^3^^, [[Przemyslaw Jedlikowski|AUTHOR Przemyslaw Jedlikowski]]^^3^^, [[Bartosz Polnik|AUTHOR Bartosz Polnik]]^^4^^, [[Jan Wandzio|AUTHOR Jan Wandzio]]^^5^^, [[Xin Wang|AUTHOR Xin Wang]]^^6^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Gdansk University of Technology, Poland; ^^2^^General Tadeusz Kościuszko Military University of Land Forces, Poland; ^^3^^Wrocław University of Science & Technology, Poland; ^^4^^KOMAG Institute of Mining Technology, Poland; ^^5^^KGHM Polska Miedź, Poland; ^^6^^China Agriculture University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4601–4605&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The broadband over power line – power line communication (BPL-PLC) cable is resistant to electricity stoppage and partial damage of phase conductors. It maintains continuity of transmission in case of an emergency. These features make it an ideal solution for delivering data, e.g. in an underground mine environment, especially clear and easily understandable voice messages. This paper describes a subjective quality evaluation of such a system. The solution was designed and tested in real-time operating conditions. It consists of a one-way transmission system, dedicated to delivering speech signals and voice commands. The study involved signal samples in three languages: English, German, and Polish, processed at different bitrates: 8, 16, and 24 kbps. Obtained results confirmed the usefulness of BPL-PLC technology for speech transmission purposes. Even in a narrowband scenario, with bitrates smaller than 1 Mbps, it proved to be a potentially life-saving communication system. Results of this study may aid researchers and parties from the mining and oil industry, as well as professionals involved in rescue operations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Waito Chiu|AUTHOR Waito Chiu]]^^1^^, [[Yan Xu|AUTHOR Yan Xu]]^^1^^, [[Andrew Abel|AUTHOR Andrew Abel]]^^1^^, [[Chun Lin|AUTHOR Chun Lin]]^^2^^, [[Zhengzheng Tu|AUTHOR Zhengzheng Tu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^XJTLU, China; ^^2^^Anhui University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4606–4610&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Lombard Effect shows that speakers increase their vocal effort in the presence of noise, and research into acoustic speech, has demonstrated varying effects, depending on the noise level and speaker, with several differences, including timing and vocal effort. Research also identified several differences, including between gender, and noise type. However, most research has focused on the audio domain, with very limited focus on the visual effect. This paper presents a detailed study of the visual Lombard Effect, using a pilot Lombard Speech corpus developed for our needs, and a recently developed Gabor based lip feature extraction approach. Using Kernel Density Estimation, we identify clear differences between genders, and also show that speakers handle different noise types differently.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qiang Huang|AUTHOR Qiang Huang]], [[Thomas Hain|AUTHOR Thomas Hain]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4611–4615&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many applications of speech technology require more and more audio data. Automatic assessment of the quality of the collected recordings is important to ensure they meet the requirements of the related applications. However, effective and high performing assessment remains a challenging task without a clean reference. In this paper, a novel model for audio quality assessment is proposed by jointly using bidirectional long short-term memory and an attention mechanism. The former is to mimic a human auditory perception ability to learn information from a recording, and the latter is to further discriminate interferences from desired signals by highlighting target related features. To evaluate our proposed approach, the TIMIT dataset is used and augmented by mixing with various natural sounds. In our experiments, two tasks are explored. The first task is to predict an utterance quality score, and the second is to identify where an anomalous distortion takes place in a recording. The obtained results show that the use of our proposed approach outperforms a strong baseline method and gains about 5% improvements after being measured by three metrics, Linear Correlation Coefficient and Spearman’s Rank Correlation Coefficient, and F1.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alessandro Ragano|AUTHOR Alessandro Ragano]]^^1^^, [[Emmanouil Benetos|AUTHOR Emmanouil Benetos]]^^2^^, [[Andrew Hines|AUTHOR Andrew Hines]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University College Dublin, Ireland; ^^2^^Queen Mary University of London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4616–4620&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Objective audio quality assessment is preferred to avoid time-consuming and costly listening tests. The development of objective quality metrics depends on the availability of datasets appropriate to the application under study. Currently, a suitable human-annotated dataset for developing quality metrics in archive audio is missing. Given the online availability of archival recordings, we propose to develop a real-world audio quality dataset. We present a methodology used to curate a speech quality database using the archive recordings from the Apollo Space Program. The proposed procedure is based on two steps: a pilot listening test and an exploratory data analysis. The pilot listening test shows that we can extract audio clips through the control of speech-to-text performance metrics to prevent data repetition. Through unsupervised exploratory data analysis, we explore the characteristics of the degradations. We classify distinct degradations and we study spectral, intensity, tonality and overall quality properties of the data through clustering techniques. These results provide the necessary foundation to support the subsequent development of large-scale crowdsourced datasets for audio quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Robin Algayres|AUTHOR Robin Algayres]]^^1^^, [[Mohamed Salah Zaiem|AUTHOR Mohamed Salah Zaiem]]^^1^^, [[Beno^ıt Sagot|AUTHOR Beno^ıt Sagot]]^^2^^, [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LSCP (UMR 8554), France; ^^2^^Inria, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4621–4625&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech embeddings are fixed-size acoustic representations of variable-length speech sequences. They are increasingly used for a variety of tasks ranging from information retrieval to unsupervised term discovery and speech segmentation. However, there is currently no clear methodology to compare or optimize the quality of these embeddings in a task-neutral way. Here, we systematically compare two popular metrics, ABX discrimination and Mean Average Precision (MAP), on 5 languages across 17 embedding methods, ranging from supervised to fully unsupervised, and using different loss functions (autoencoders, correspondance autoencoders, siamese). Then we use the ABX and MAP to predict performances on a new downstream task: the unsupervised estimation of the frequencies of speech segments in a given corpus. We find that overall, ABX and MAP correlate with one another and with frequency estimation. However, substantial discrepancies appear in the fine-grained distinctions across languages and/or embedding methods. This makes it unrealistic at present to propose a task-independent silver bullet method for computing the intrinsic quality of speech embeddings. There is a need for more detailed analysis of the metrics currently used to evaluate such embeddings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hao Li|AUTHOR Hao Li]]^^1^^, [[DeLiang Wang|AUTHOR DeLiang Wang]]^^2^^, [[Xueliang Zhang|AUTHOR Xueliang Zhang]]^^1^^, [[Guanglai Gao|AUTHOR Guanglai Gao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Inner Mongolia University, China; ^^2^^Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4626–4630&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigates deep learning based signal-to-noise ratio (SNR) estimation at the frame level. We propose to employ recurrent neural networks (RNNs) with long short-term memory (LSTM) in order to leverage contextual information for this task. As acoustic features are important for deep learning algorithms, we also examine a variety of monaural features and investigate feature combinations using Group Lasso and sequential floating forward selection. By replacing LSTM with bidirectional LSTM, the proposed algorithm naturally leads to a long-term SNR estimator. Systematical evaluations demonstrate that the proposed SNR estimators significantly outperform other frame-level and long-term SNR estimators.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xuan Dong|AUTHOR Xuan Dong]], [[Donald S. Williamson|AUTHOR Donald S. Williamson]]
</p><p class="cpabstractcardaffiliationlist">Indiana University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4631–4635&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The real-world capabilities of objective speech quality measures are limited since current measures (1) are developed from simulated data that does not adequately model real environments; or they (2) predict objective scores that are not always strongly correlated with subjective ratings. Additionally, a large dataset of real-world signals with listener quality ratings does not currently exist, which would help facilitate real-world assessment. In this paper, we collect and predict the perceptual quality of real-world speech signals that are evaluated by human listeners. We first collect a large quality rating dataset by conducting crowdsourced listening studies on two real-world corpora. We further develop a novel approach that predicts human quality ratings using a pyramid bidirectional long short term memory (pBLSTM) network with an attention mechanism. The results show that the proposed model achieves statistically lower estimation errors than prior assessment approaches, where the predicted scores strongly correlate with human judgments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Avamarie Brueggeman|AUTHOR Avamarie Brueggeman]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4636–4640&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Although speech recognition technology for cochlear implants has continued to improve, music accessibility remains a challenge. Previous studies have shown that cochlear implant users may prefer listening to music that has been reengineered to be less complex. In this paper, we consider the combined effect of spectral complexity reduction and number of instruments playing on musical enjoyment with cochlear implants. Nine normal hearing listeners rated 200 10-second music samples on three enjoyment modalities (musicality, pleasantness, and naturalness) with and without the use of cochlear implant simulation. The music samples included 20 versions of the song “Twinkle Twinkle Little Star” synthesized using one of five different instruments and with one to four instruments playing at once. The remaining 180 versions were created by reducing each sample’s spectral complexity to nine different levels using principal component analysis. The results showed a preference for less amounts of spectral complexity reduction for samples without cochlear implant simulation (P<.001), as well as a preference for fewer instruments for samples with cochlear implant simulation (P<.001). However, spectral complexity reduction was not a significant factor for samples with cochlear implant simulation, and a significant interaction effect between spectral complexity reduction and number of instruments was not found.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Matt O’Connor|AUTHOR Matt O’Connor]], [[W. Bastiaan Kleijn|AUTHOR W. Bastiaan Kleijn]]
</p><p class="cpabstractcardaffiliationlist">Victoria University of Wellington, New Zealand</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4646–4650&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech privacy in modern sensor network environments is necessary for widespread adoption and public trust of collaborative acoustic signal processing. Most current distributed privacy research deals with ensuring local node observations are not accessible by neighbouring nodes while still solving shared tasks. In this work we develop the concept of distributed task privacy in unbounded public networks, where linear codes are used to create limits on the number of nodes contributing to a distributed summation task, such as beamforming. We accomplish this by wrapping local observations in a linear code and intentionally applying symbol errors prior to transmission. If many nodes join a distributed speech enhancement task, a proportional number of symbol errors are introduced into the aggregated code leading to decoding failure if the code’s predefined symbol error limit is exceeded.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anna Leschanowsky|AUTHOR Anna Leschanowsky]], [[Sneha Das|AUTHOR Sneha Das]], [[Tom Bäckström|AUTHOR Tom Bäckström]], [[Pablo Pérez Zarazaga|AUTHOR Pablo Pérez Zarazaga]]
</p><p class="cpabstractcardaffiliationlist">Aalto University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4651–4655&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice based devices and virtual assistants are widely integrated into our daily life, but the growing popularity has also raised concerns about data privacy in processing and storage. While improvements in technology and data protection regulations have been made to provide users a more secure experience, the concept of privacy continues to be subject to enormous challenges. We can observe that people intuitively adjust their way of talking in a human-to-human conversation, an intuition that devices could benefit from to increase their level of privacy. In order to enable devices to quantify privacy in an acoustic scenario, this paper focuses on how people perceive privacy with respect to environmental noise. We measured privacy scores on a crowdsourcing platform with a paired comparison listening test and obtained reliable and consistent results. Our measurements show that the experience of privacy varies depending on the acoustic features of the ambient noise. Furthermore, multiple probabilistic choice models were fitted to the data to obtain a meaningful ordering of noise scenarios conveying listeners’ preferences. A preference tree model was found to fit best, indicating that subjects change their decision strategy depending on the scenarios under test.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Felix Kreuk|AUTHOR Felix Kreuk]]^^1^^, [[Yossi Adi|AUTHOR Yossi Adi]]^^2^^, [[Bhiksha Raj|AUTHOR Bhiksha Raj]]^^3^^, [[Rita Singh|AUTHOR Rita Singh]]^^3^^, [[Joseph Keshet|AUTHOR Joseph Keshet]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Bar-Ilan University, Israel; ^^2^^Facebook, Israel; ^^3^^Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4656–4660&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Steganography is the science of hiding a secret message within an ordinary public message, which is referred to as Carrier. Traditionally, digital signal processing techniques, such as least significant bit encoding, were used for hiding messages. In this paper, we explore the use of deep neural networks as steganographic functions for speech data. We showed that steganography models proposed for vision are less suitable for speech, and propose a new model that includes the short-time Fourier transform and inverse-short-time Fourier transform as differentiable layers within the network, thus imposing a vital constraint on the network outputs. We empirically demonstrated the effectiveness of the proposed method comparing to deep learning based on several speech datasets and analyzed the results quantitatively and qualitatively. Moreover, we showed that the proposed approach could be applied to conceal multiple messages in a single carrier using multiple decoders or a single conditional decoder. Lastly, we evaluated our model under different channel distortions. Qualitative experiments suggest that modifications to the carrier are unnoticeable by human listeners and that the decoded messages are highly intelligible.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sina Däubener|AUTHOR Sina Däubener]], [[Lea Schönherr|AUTHOR Lea Schönherr]], [[Asja Fischer|AUTHOR Asja Fischer]], [[Dorothea Kolossa|AUTHOR Dorothea Kolossa]]
</p><p class="cpabstractcardaffiliationlist">Ruhr-Universität Bochum, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4661–4665&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Machine learning systems and also, specifically, automatic speech recognition (ASR) systems are vulnerable against adversarial attacks, where an attacker maliciously changes the input. In the case of ASR systems, the most interesting cases are //targeted// attacks, in which an attacker aims to force the system into recognizing given target transcriptions in an arbitrary audio sample. The increasing number of sophisticated, quasi imperceptible attacks raises the question of countermeasures.

In this paper, we focus on hybrid ASR systems and compare four acoustic models regarding their ability to indicate uncertainty under attack: a feed-forward neural network and three neural networks specifically designed for uncertainty quantification, namely a Bayesian neural network, Monte Carlo dropout, and a deep ensemble.

We employ uncertainty measures of the acoustic model to construct a simple one-class classification model for assessing whether inputs are benign or adversarial. Based on this approach, we are able to detect adversarial examples with an area under the receiving operator curve score of more than 0.99. The neural networks for uncertainty quantification simultaneously diminish the vulnerability to the attack, which is reflected in a lower recognition accuracy of the malicious target text in comparison to a standard hybrid ASR system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David Ifeoluwa Adelani|AUTHOR David Ifeoluwa Adelani]], [[Ali Davody|AUTHOR Ali Davody]], [[Thomas Kleinbauer|AUTHOR Thomas Kleinbauer]], [[Dietrich Klakow|AUTHOR Dietrich Klakow]]
</p><p class="cpabstractcardaffiliationlist">Universität des Saarlandes, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4666–4670&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Machine Learning approaches to Natural Language Processing tasks benefit from a comprehensive collection of real-life user data. At the same time, there is a clear need for protecting the privacy of the users whose data is collected and processed. For text collections, such as, e.g., transcripts of voice interactions or patient records, replacing sensitive parts with benign alternatives can provide de-identification. However, how much privacy is actually guaranteed by such text transformations, and are the resulting texts still useful for machine learning?

In this paper, we derive formal privacy guarantees for general text transformation-based de-identification methods on the basis of //Differential Privacy//.

We also measure the effect that different ways of masking private information in dialog transcripts have on a subsequent machine learning task. To this end, we formulate different masking strategies and compare their privacy-utility trade-offs. In particular, we compare a simple //redact// approach with more sophisticated //word-by-word// replacement using deep learning models on multiple natural language understanding tasks like named entity recognition, intent detection, and dialog act classification. We find that only word-by-word replacement is robust against performance drops in various tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tejas Jayashankar|AUTHOR Tejas Jayashankar]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]], [[Pierre Moulin|AUTHOR Pierre Moulin]]
</p><p class="cpabstractcardaffiliationlist">MERL, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4671–4675&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Various adversarial audio attacks have recently been developed to fool automatic speech recognition (ASR) systems. We here propose a defense against such attacks based on the uncertainty introduced by dropout in neural networks. We show that our defense is able to detect attacks created through optimized perturbations and frequency masking on a state-of-the-art end-to-end ASR system. Furthermore, the defense can be made robust against attacks that are immune to noise reduction. We test our defense on Mozilla’s CommonVoice dataset, the UrbanSound dataset, and an excerpt of the LibriSpeech dataset, showing that it achieves high detection accuracy in a wide range of scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wen-Chin Huang|AUTHOR Wen-Chin Huang]]^^1^^, [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]]^^1^^, [[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]]^^1^^, [[Hirokazu Kameoka|AUTHOR Hirokazu Kameoka]]^^2^^, [[Tomoki Toda|AUTHOR Tomoki Toda]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nagoya University, Japan; ^^2^^NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4676–4680&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce a novel sequence-to-sequence (seq2seq) voice conversion (VC) model based on the Transformer architecture with text-to-speech (TTS) pretraining. Seq2seq VC models are attractive owing to their ability to convert prosody. While seq2seq models based on recurrent neural networks (RNNs) and convolutional neural networks (CNNs) have been successfully applied to VC, the use of the Transformer network, which has shown promising results in various speech processing tasks, has not yet been investigated. Nonetheless, their data-hungry property and the mispronunciation of converted speech make seq2seq models far from practical. To this end, we propose a simple yet effective pretraining technique to transfer knowledge from learned TTS models, which benefit from large-scale, easily accessible TTS corpora. VC models initialized with such pretrained model parameters are able to generate effective hidden representations for high-fidelity, highly intelligible converted speech. Experimental results show that such a pretraining scheme can facilitate data-efficient training and outperform an RNN-based seq2seq VC model in terms of intelligibility, naturalness, and similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Songxiang Liu|AUTHOR Songxiang Liu]]^^1^^, [[Yuewen Cao|AUTHOR Yuewen Cao]]^^1^^, [[Shiyin Kang|AUTHOR Shiyin Kang]]^^2^^, [[Na Hu|AUTHOR Na Hu]]^^2^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Dong Yu|AUTHOR Dong Yu]]^^3^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^Tencent, China; ^^3^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4721–4725&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice conversion (VC) techniques aim to modify speaker identity of an utterance while preserving the underlying linguistic information. Most VC approaches ignore modeling of the speaking style (e.g. emotion and emphasis), which may contain the factors intentionally added by the speaker and should be retained during conversion. This study proposes a sequence-to-sequence based non-parallel VC approach, which has the capability of transferring the speaking style from the source speech to the converted speech by explicitly modeling. Objective evaluation and subjective listening tests show superiority of the proposed VC approach in terms of speech naturalness and speaker similarity of the converted speech. Experiments are also conducted to show the source-style transferability of the proposed approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ehab A. AlBadawy|AUTHOR Ehab A. AlBadawy]], [[Siwei Lyu|AUTHOR Siwei Lyu]]
</p><p class="cpabstractcardaffiliationlist">SUNY Albany, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4726–4730&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>An impressionist is the one who tries to mimic other people’s voices and their style of speech. Humans have mastered such a task throughout the years. In this work, we introduce a deep learning-based approach to do voice conversion with speech style transfer across different speakers. In our work, we use a combination of Variational Auto-Encoder (VAE) and Generative Adversarial Network (GAN) as the main components of our proposed model followed by a WaveNet-based vocoder. We use three objective metrics to evaluate our model using the ASVspoof 2019 for measuring the difficulty of differentiating between human and synthesized samples, content verification for transcription accuracy, and speaker encoding for identity verification. Our results show the efficacy of our proposed model in producing a high quality synthesized speech on Flickr8k audio corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hitoshi Suda|AUTHOR Hitoshi Suda]], [[Gaku Kotani|AUTHOR Gaku Kotani]], [[Daisuke Saito|AUTHOR Daisuke Saito]]
</p><p class="cpabstractcardaffiliationlist">University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4681–4685&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a new voice conversion (VC) framework, which can be trained with nonparallel corpora, using non-negative matrix factorization (NMF). While nonparallel VC frameworks have already been studied widely, the conventional frameworks require huge background knowledge or plenty of training utterances. This is because of difficulty in disentanglement of linguistic and speaker information without a large amount of data. This work tackles the problem by utilizing NMF, which can factorize acoustic features into time-variant and time-invariant components in an unsupervised manner. To preserve linguistic consistency between source and target speakers, the proposed method performs soft alignment between the acoustic features of the source speaker and the exemplars of the target speaker. The method adopts the alignment technique of INCA algorithm, which is an iterative method to obtain alignment of nonparallel corpora. The results of subjective experiments showed that the proposed framework outperformed not only the NMF-based parallel VC framework but also the CycleGAN-based nonparallel VC framework. The results also showed that the proposed method achieved high-quality conversion even if the number of training utterances for the source speaker was extremely limited.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chen-Yu Chen|AUTHOR Chen-Yu Chen]]^^1^^, [[Wei-Zhong Zheng|AUTHOR Wei-Zhong Zheng]]^^1^^, [[Syu-Siang Wang|AUTHOR Syu-Siang Wang]]^^2^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^2^^, [[Pei-Chun Li|AUTHOR Pei-Chun Li]]^^3^^, [[Ying-Hui Lai|AUTHOR Ying-Hui Lai]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Yang-Ming University; ^^2^^Academia Sinica; ^^3^^Mackay Medical College</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4686–4690&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The voice conversion (VC) system is a well-known approach to improve the communication efficiency of patients with dysarthria. In this study, we used a gated convolutional neural network (Gated CNN) with the phonetic posteriorgrams (PPGs) features to perform VC for patients with dysarthria, with WaveRNN vocoder used to synthesis converted speech. In addition, two well-known deep learning-based models, convolution neural network (CNN) and bidirectional long short-term memory (BLSTM) were used to compare with the Gated CNN in the proposed VC system. The results from the evaluation of speech intelligibility metric of Google ASR and listening test showed that the proposed system performed better than the original dysarthric speech. Meanwhile, the Gated CNN model performs better than the other models and requires fewer parameters compared to BLSTM. The results suggested that Gated CNN can be used as a communication assistive system to overcome the degradation of speech intelligibility caused by dysarthria.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Da-Yi Wu|AUTHOR Da-Yi Wu]], [[Yen-Hao Chen|AUTHOR Yen-Hao Chen]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4691–4695&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice conversion (VC) is a task that transforms the source speaker’s timbre, accent, and tones in audio into another one’s while preserving the linguistic content. It is still a challenging work, especially in a one-shot setting. Auto-encoder-based VC methods disentangle the speaker and the content in input speech without explicit information about the speaker’s identity, so these methods can further generalize to unseen speakers. The disentangle capability is achieved by vector quantization (VQ), adversarial training, or instance normalization (IN). However, the imperfect disentanglement may harm the quality of output speech. In this work, to further improve audio quality, we use the U-Net architecture within an auto-encoder-based VC system. We find that to leverage the U-Net architecture, a strong information bottleneck is necessary. The VQ-based method, which quantizes the latent vectors, can serve the purpose. The objective and the subjective evaluations show that the proposed method performs well in both audio naturalness and speaker similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seung-won Park|AUTHOR Seung-won Park]]^^1^^, [[Doo-young Kim|AUTHOR Doo-young Kim]]^^1^^, [[Myun-chul Joe|AUTHOR Myun-chul Joe]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Seoul National University, Korea; ^^2^^MINDs Lab, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4696–4700&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose //Cotatron//, a transcription-guided speech encoder for speaker-independent linguistic representation. Cotatron is based on the multispeaker TTS architecture and can be trained with conventional TTS datasets. We train a voice conversion system to reconstruct speech with Cotatron features, which is similar to the previous methods based on Phonetic Posteriorgram (PPG). By training and evaluating our system with 108 speakers from the VCTK dataset, we outperform the previous method in terms of both naturalness and speaker similarity. Our system can also convert speech from speakers that are unseen during training, and utilize ASR to automate the transcription with minimal reduction of the performance. Audio samples are available at https://mindslab-ai.github.io/cotatron, and the code with a pre-trained model will be made available soon.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruibo Fu|AUTHOR Ruibo Fu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Tao Wang|AUTHOR Tao Wang]], [[Chunyu Qiang|AUTHOR Chunyu Qiang]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4701–4705&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end speech synthesis can reach high quality and naturalness with low-resource adaptation data. However, the generalization of out-domain texts and the improving modeling accuracy of speaker representations are still challenging tasks. The limited adaptation data leads to unacceptable errors and low similarity of the synthetic speech. In this paper, both speaker representations modeling and acoustic model structure are improved for the speaker adaptation task. On the one hand, compared with the conventional methods that focused on using fixed global speaker representations, the attention gating is proposed to adjust speaker representations dynamically based on the attended context and prosody information, which can describe more pronunciation characteristics in phoneme level. On the other hand, to improve the robustness and avoid over-fitting, the decoder model is factored into average-net and adaptation-net, which are designed for learning speaker independent acoustic features and target speaker timbre imitation respectively. And the context discriminator is pre-trained by large ASR data to supervise the average-net generating proper speaker independent acoustic features for different phoneme. Experimental results on Mandarin dataset show that proposed methods lead to an improvement on intelligibility, naturalness and similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zheng Lian|AUTHOR Zheng Lian]]^^1^^, [[Zhengqi Wen|AUTHOR Zhengqi Wen]]^^1^^, [[Xinyong Zhou|AUTHOR Xinyong Zhou]]^^2^^, [[Songbai Pu|AUTHOR Songbai Pu]]^^3^^, [[Shengkai Zhang|AUTHOR Shengkai Zhang]]^^3^^, [[Jianhua Tao|AUTHOR Jianhua Tao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Northwestern Polytechnical University, China; ^^3^^Momo, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4706–4710&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice conversion (VC) is to convert the source speaker’s voice to sound like that of the target speaker without changing the linguistic content. Recent work shows that phonetic posteriorgrams (PPGs) based VC frameworks have achieved promising results in speaker similarity and speech quality. However, in practice, we find that the trajectory of some generated waveforms is not smooth, thus causing some voice error problems and degrading the sound quality of the converted speech. In this paper, we propose to advance the existing PPGs based voice conversion methods to achieve better performance. Specifically, we propose a new auto-regressive model for any-to-one VC, called Auto-Regressive Voice Conversion (ARVC). Compared with conventional PPGs based VC, ARVC takes previous step acoustic features as the inputs to produce the next step outputs via the auto-regressive structure. Experimental results on the CMU-ARCTIC dataset show that our method can improve the speech quality and speaker similarity of the converted speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shahan Nercessian|AUTHOR Shahan Nercessian]]
</p><p class="cpabstractcardaffiliationlist">iZotope, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4711–4715&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a zero-shot voice conversion algorithm adding a number of conditioning signals to explicitly transfer prosody, linguistic content, and dynamics to conversion results. We show that the proposed approach improves overall conversion quality and generalization to out-of-domain samples relative to a baseline implementation of AutoVC, as the inclusion of conditioning signals can help reduce the burden on the model’s encoder to implicitly learn all of the different aspects involved in speech production. An ablation analysis illustrates the effectiveness of the proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Minchuan Chen|AUTHOR Minchuan Chen]], [[Weijian Hou|AUTHOR Weijian Hou]], [[Jun Ma|AUTHOR Jun Ma]], [[Shaojun Wang|AUTHOR Shaojun Wang]], [[Jing Xiao|AUTHOR Jing Xiao]]
</p><p class="cpabstractcardaffiliationlist">Ping An Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4716–4720&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent studies have shown remarkable success in voice conversion (VC) based on generative adversarial networks (GANs) without parallel data. In this paper, based on the conditional generative adversarial networks (CGANs), we propose a self- and semi-supervised method combined with mixup and data augmentation that allows non-parallel many-to-many voice conversion with fewer labeled data. In this method, the discriminator of CGANs learns to not only distinguish real/fake samples, but also classify attribute domains. We augment the discriminator with an auxiliary task to improve representation learning and introduce a training task to predict labels for the unlabeled samples. The proposed approach reduces the appetite for labeled data in voice conversion, which enables single generative network to implement many-to-many mapping between different voice domains. Experiment results show that the proposed method is able to achieve comparable voice quality and speaker similarity with only 10% of the labeled data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Changhan Wang|AUTHOR Changhan Wang]], [[Juan Pino|AUTHOR Juan Pino]], [[Jiatao Gu|AUTHOR Jiatao Gu]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4731–4735&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transfer learning from high-resource languages is known to be an efficient way to improve end-to-end automatic speech recognition (ASR) for low-resource languages. Pre-trained or jointly trained encoder-decoder models, however, do not share the language modeling (decoder) for the same language, which is likely to be inefficient for distant target languages. We introduce speech-to-text translation (ST) as an auxiliary task to incorporate additional knowledge of the target language and enable transferring from that target language. Specifically, we first translate high-resource ASR transcripts into a target low-resource language, with which a ST model is trained. Both ST and target ASR share the same attention-based encoder-decoder architecture and vocabulary. The former task then provides a fully pre-trained model for the latter, bringing up to 24.6% word error rate (WER) reduction to the baseline (direct transfer from high-resource ASR). We show that training ST with human translations is not necessary. ST trained with machine translation (MT) pseudo-labels brings consistent gains. It can even outperform those using human labels when transferred to target ASR by leveraging only 500K MT examples. Even with pseudo-labels from low-resource MT (200K examples), ST-enhanced transfer brings up to 8.9% WER reduction to direct transfer.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zimeng Qiu|AUTHOR Zimeng Qiu]]^^1^^, [[Yiyuan Li|AUTHOR Yiyuan Li]]^^2^^, [[Xinjian Li|AUTHOR Xinjian Li]]^^2^^, [[Florian Metze|AUTHOR Florian Metze]]^^2^^, [[William M. Campbell|AUTHOR William M. Campbell]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4776–4780&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switching (CS) speech recognition is drawing increasing attention in recent years as it is a common situation in speech where speakers alternate between languages in the context of a single utterance or discourse. In this work, we propose Hierarchical Attention-based Recurrent Decoder (HARD) to build a context-aware end-to-end code-switching speech recognition system. HARD is an attention-based decoder model which employs a hierarchical recurrent network to enhance model’s awareness of previous generated historical sequence (sub-sequence) at decoding. This architecture has two LSTMs to model encoder hidden states at both the character level and sub-sequence level, therefore enables us to generate utterances that switch between languages more precisely from speech. We also employ language identification (LID) as an auxiliary task in multi-task learning (MTL) to boost speech recognition performance. We evaluate the effectiveness of our model on the SEAME dataset, results show that our multi-task learning HARD (MTL-HARD) model improves over the baseline Listen, Attend and Spell (LAS) model by reducing character error rate (CER) from 29.91% to 26.56% and mixed error rate (MER) from 38.99% to 34.50%, and case study shows MTL-HARD can carry historical information in the sub-sequences.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Samuel Thomas|AUTHOR Samuel Thomas]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]]
</p><p class="cpabstractcardaffiliationlist">IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4736–4740&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multilingual acoustic models are often used to build automatic speech recognition (ASR) systems for low-resource languages. We propose a novel data augmentation technique to improve the performance of an end-to-end (E2E) multilingual acoustic model by transliterating data into the various languages that are part of the multilingual training set. Along with two metrics for data selection, this technique can also improve recognition performance of the model on unsupervised and cross-lingual data. On a set of four low-resource languages, we show that word error rates (WER) can be reduced by up to 12% and 5% relative compared to monolingual and multilingual baselines respectively. We also demonstrate how a multilingual network constructed within this framework can be extended to a new training language. With the proposed methods, the new model has WER reductions of up to 24% and 13% respectively compared to monolingual and multilingual baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yun Zhu|AUTHOR Yun Zhu]], [[Parisa Haghani|AUTHOR Parisa Haghani]], [[Anshuman Tripathi|AUTHOR Anshuman Tripathi]], [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]], [[Brian Farris|AUTHOR Brian Farris]], [[Hainan Xu|AUTHOR Hainan Xu]], [[Han Lu|AUTHOR Han Lu]], [[Hasim Sak|AUTHOR Hasim Sak]], [[Isabel Leal|AUTHOR Isabel Leal]], [[Neeraj Gaur|AUTHOR Neeraj Gaur]], [[Pedro J. Moreno|AUTHOR Pedro J. Moreno]], [[Qian Zhang|AUTHOR Qian Zhang]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4741–4745&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multilingual automatic speech recognition systems can transcribe utterances from different languages. These systems are attractive from different perspectives: they can provide quality improvements, specially for lower resource languages, and simplify the training and deployment procedure. End-to-end speech recognition has further simplified multilingual modeling as one model, instead of several components of a classical system, have to be unified. In this paper, we investigate a streamable end-to-end multilingual system based on the Transformer Transducer [1]. We propose several techniques for adapting the self-attention architecture based on the language id. We analyze the trade-offs of each method with regards to quality gains and number of additional parameters introduced. We conduct experiments in a real-world task consisting of five languages. Our experimental results demonstrate ~8% to ~20% relative gain over the baseline multilingual model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Srikanth Madikeri|AUTHOR Srikanth Madikeri]]^^1^^, [[Banriskhem K. Khonglah|AUTHOR Banriskhem K. Khonglah]]^^1^^, [[Sibo Tong|AUTHOR Sibo Tong]]^^1^^, [[Petr Motlicek|AUTHOR Petr Motlicek]]^^1^^, [[Hervé Bourlard|AUTHOR Hervé Bourlard]]^^1^^, [[Daniel Povey|AUTHOR Daniel Povey]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Idiap Research Institute, Switzerland; ^^2^^Xiaomi, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4746–4750&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multilingual acoustic model training combines data from multiple languages to train an automatic speech recognition system. Such a system is beneficial when training data for a target language is limited. Lattice-Free Maximum Mutual Information (LF-MMI) training performs sequence discrimination by introducing competing hypotheses through a denominator graph in the cost function. The standard approach to train a multilingual model with LF-MMI is to combine the acoustic units from all languages and use a common denominator graph. The resulting model is either used as a feature extractor to train an acoustic model for the target language or directly fine-tuned. In this work, we propose a scalable approach to train the multilingual acoustic model using a typical multitask network for the LF-MMI framework. A set of language-dependent denominator graphs is used to compute the cost function. The proposed approach is evaluated under typical multilingual ASR tasks using GlobalPhone and BABEL datasets. Relative improvements up to 13.2% in WER are obtained when compared to the corresponding monolingual LF-MMI baselines. The implementation is made available as a part of the Kaldi speech recognition toolkit.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vineel Pratap|AUTHOR Vineel Pratap]], [[Anuroop Sriram|AUTHOR Anuroop Sriram]], [[Paden Tomasello|AUTHOR Paden Tomasello]], [[Awni Hannun|AUTHOR Awni Hannun]], [[Vitaliy Liptchinsky|AUTHOR Vitaliy Liptchinsky]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4751–4755&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We study training a single acoustic model for multiple languages with the aim of improving automatic speech recognition (ASR) performance on low-resource languages, and overall simplifying deployment of ASR systems that support diverse languages. We perform an extensive benchmark on 51 languages, with varying amount of training data by language (from 100 hours to 1100 hours). We compare three variants of multilingual training from a single joint model without knowing the input language, to using this information, to multiple heads (one per language “cluster”). We show that multilingual training of ASR models on several languages can improve recognition performance, in particular, on low resource languages. We see 20.9%, 23% and 28.8% average WER relative reduction compared to monolingual baselines on joint model, joint model with language input and multi head model respectively. To our knowledge, this is the first work studying multilingual ASR at massive scale, with more than 50 languages and more than 16,000 hours of audio across them.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hardik B. Sailor|AUTHOR Hardik B. Sailor]]^^1^^, [[Thomas Hain|AUTHOR Thomas Hain]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Samsung, India; ^^2^^University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4756–4760&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a multilingual acoustic modeling approach for Indian languages using a Multitask Learning (MTL) framework. Language-specific phoneme recognition is explored as an auxiliary task in MTL framework along with the primary task of multilingual senone classification. This auxiliary task regularizes the primary task with both the context-independent phonemes and language identities induced by language-specific phoneme. The MTL network is also extended by structuring the primary and auxiliary task outputs in the form of a Structured Output Layer (SOL) such that both depend on each other. The experiments are performed using a database of the three Indian languages Gujarati, Tamil, and Telugu. The experimental results show that the proposed MTL-SOL framework performed well compared to baseline monolingual systems with a relative reduction of 3.1–4.4 and 2.9–4.1% in word error rate for the development and evaluation sets, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Khyathi Raghavi Chandu|AUTHOR Khyathi Raghavi Chandu]], [[Alan W. Black|AUTHOR Alan W. Black]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4761–4765&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-Switching (CS) is a prevalent phenomenon observed in bilingual and multilingual communities, especially in digital and social media platforms. A major problem in this domain is the dearth of substantial corpora to train large scale neural models. Generating vast amounts of quality synthetic text assists several downstream tasks that heavily rely on language modeling such as speech recognition, text-to-speech synthesis etc,. We present a novel vantage point of CS to be style variations between both the participating languages. Our approach does not need any external dense annotations such as lexical language ids. It relies on easily obtainable monolingual corpora without any parallel alignment and a limited set of naturally CS sentences. We propose a two-stage generative adversarial training approach where the first stage generates competitive negative examples for CS and the second stage generates more realistic CS sentences. We present our experiments on the following pairs of languages: Spanish-English, Mandarin-English, Hindi-English and Arabic-French. We show that the trends in metrics for generated CS move closer to real CS data in the above language pairs through the dual stage training process. We believe this viewpoint of CS as style variations opens new perspectives for modeling various tasks in CS text.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yizhou Lu|AUTHOR Yizhou Lu]], [[Mingkun Huang|AUTHOR Mingkun Huang]], [[Hao Li|AUTHOR Hao Li]], [[Jiaqi Guo|AUTHOR Jiaqi Guo]], [[Yanmin Qian|AUTHOR Yanmin Qian]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4766–4770&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switching speech recognition is a challenging task which has been studied in many previous work, and one main challenge for this task is the lack of code-switching data. In this paper, we study end-to-end models for Mandarin-English code-switching automatic speech recognition. External monolingual data are utilized to alleviate the data sparsity problem. More importantly, we propose a bi-encoder transformer network based Mixture of Experts (MoE) architecture to better leverage these data. We decouple Mandarin and English modeling with two separate encoders to better capture language-specific information, and a gating network is employed to explicitly handle the language identification task. For the gating network, different models and training modes are explored to learn the better MoE interpolation coefficients. Experimental results show that compared with the baseline transformer model, the proposed new MoE architecture can obtain up to 10.4% relative error reduction on the code-switching test set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yash Sharma|AUTHOR Yash Sharma]]^^1^^, [[Basil Abraham|AUTHOR Basil Abraham]]^^2^^, [[Karan Taneja|AUTHOR Karan Taneja]]^^1^^, [[Preethi Jyothi|AUTHOR Preethi Jyothi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IIT Bombay, India; ^^2^^Microsoft, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4771–4775&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Building Automatic Speech Recognition (ASR) systems for code-switched speech has recently gained renewed attention due to the widespread use of speech technologies in multilingual communities worldwide. End-to-end ASR systems are a natural modeling choice due to their ease of use and superior performance in monolingual settings. However, it is well-known that end-to-end systems require large amounts of labeled speech. In this work, we investigate improving code-switched ASR in low resource settings via data augmentation using code-switched text-to-speech (TTS) synthesis. We propose two targeted techniques to effectively leverage TTS speech samples: 1) Mixup, an existing technique to create new training samples via linear interpolation of existing samples, applied to TTS and real speech samples, and 2) a new loss function, used in conjunction with TTS samples, to encourage code-switched predictions. We report significant improvements in ASR performance achieving absolute word error rate (WER) reductions of up to 5%, and measurable improvement in code switching using our proposed techniques on a Hindi-English code-switched ASR task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tuan Dinh|AUTHOR Tuan Dinh]]^^1^^, [[Alexander Kain|AUTHOR Alexander Kain]]^^1^^, [[Robin Samlan|AUTHOR Robin Samlan]]^^2^^, [[Beiming Cao|AUTHOR Beiming Cao]]^^3^^, [[Jun Wang|AUTHOR Jun Wang]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Oregon Health & Science University, USA; ^^2^^University of Arizona, USA; ^^3^^University of Texas at Austin, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4781–4785&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Individuals who undergo a laryngectomy lose their ability to phonate. Yet current treatment options allow alaryngeal speech, they struggle in their daily communication and social life due to the low intelligibility of their speech. In this paper, we presented two conversion methods for increasing intelligibility and naturalness of speech produced by laryngectomees (LAR). The first method used a deep neural network for predicting binary voicing/unvoicing or the degree of aperiodicity. The second method used a conditional generative adversarial network to learn the mapping from LAR speech spectra to clearly-articulated speech spectra. We also created a synthetic fundamental frequency trajectory with an intonation model consisting of phrase and accent curves. For the two conversion methods, we showed that adaptation always increased the performance of pre-trained models, objectively. In subjective testing involving four LAR speakers, we significantly improved the naturalness of two speakers, and we also significantly improved the intelligibility of one speaker.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bence Mark Halpern|AUTHOR Bence Mark Halpern]]^^1^^, [[Rob van Son|AUTHOR Rob van Son]]^^1^^, [[Michiel van den Brekel|AUTHOR Michiel van den Brekel]]^^1^^, [[Odette Scharenborg|AUTHOR Odette Scharenborg]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universiteit van Amsterdam, The Netherlands; ^^2^^Technische Universiteit Delft, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4826–4830&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Oral cancer speech is a disease which impacts more than half a million people worldwide every year. Analysis of oral cancer speech has so far focused on read speech. In this paper, we 1) present and 2) analyse a three-hour long spontaneous oral cancer speech dataset collected from YouTube. 3) We set baselines for an oral cancer speech detection task on this dataset. The analysis of these explainable machine learning baselines shows that sibilants and stop consonants are the most important indicators for spontaneous oral cancer speech detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Han Tong|AUTHOR Han Tong]]^^1^^, [[Hamid Sharifzadeh|AUTHOR Hamid Sharifzadeh]]^^1^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Unitec, New Zealand; ^^2^^SIT, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4786–4790&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Dysarthria is a speech disorder that can significantly impact a person’s daily life, and yet may be amenable to therapy. To automatically detect and classify dysarthria, researchers have proposed various computational approaches ranging from traditional speech processing methods focusing on speech rate, intelligibility, intonation, etc. to more advanced machine learning techniques. Recently developed machine learning systems rely on audio features for classification; however, research in other fields has shown that audio-video cross-modal frameworks can improve classification accuracy while simultaneously reducing the amount of training data required compared to uni-modal systems (i.e. audio- or video-only).

In this paper, we propose an audio-video cross-modal deep learning framework that takes both audio and video data as input to classify dysarthria severity levels. Our novel cross-modal framework achieves over 99% test accuracy on the UASPEECH dataset — significantly outperforming current uni-modal systems that utilise audio data alone. More importantly, it is able to accelerate training time while improving accuracy, and to do so with reduced training data requirements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuqin Lin|AUTHOR Yuqin Lin]]^^1^^, [[Longbiao Wang|AUTHOR Longbiao Wang]]^^1^^, [[Sheng Li|AUTHOR Sheng Li]]^^2^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^1^^, [[Chenchen Ding|AUTHOR Chenchen Ding]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin University, China; ^^2^^NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4791–4795&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study proposes a staged knowledge distillation method to build End-to-End (E2E) automatic speech recognition (ASR) and automatic speech attribute transcription (ASAT) systems for patients with dysarthria caused by either cerebral palsy (CP) or amyotrophic lateral sclerosis (ALS). Compared with traditional methods, our proposed method can use limited dysarthric speech more effectively. And the dysarthric E2E-ASR and ASAT systems enhanced by the proposed method can achieve 38.28% relative phone error rate (PER%) reduction and 48.33% relative attribute detection error rate (DER%) reduction over their baselines respectively on the TORGO dataset. The experiments show that our system offers potential as a rehabilitation tool and medical diagnostic aid.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuki Takashima|AUTHOR Yuki Takashima]]^^1^^, [[Ryoichi Takashima|AUTHOR Ryoichi Takashima]]^^2^^, [[Tetsuya Takiguchi|AUTHOR Tetsuya Takiguchi]]^^2^^, [[Yasuo Ariki|AUTHOR Yasuo Ariki]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Hitachi, Japan; ^^2^^Kobe University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4796–4800&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present in this paper an automatic speech recognition (ASR) system for a person with an articulation disorder resulting from athetoid cerebral palsy. Because their utterances are often unstable or unclear, speech recognition systems have difficulty recognizing the speech of those with this disorder. For example, their speech styles often fluctuate greatly even when they are repeating the same sentences. For this reason, their speech tends to have great variation even within recognition classes. To alleviate this intra-class variation problem, we propose an ASR system based on deep metric learning. This system learns an embedded representation that is characterized by a small distance between input utterances of the same class, while the distance of the input utterances of different classes is large. Therefore, our method makes it easy for the ASR system to distinguish dysarthric speech. Experimental results show that our proposed approach using deep metric learning improves the word-recognition accuracy consistently. Moreover, we also evaluate the combination of our proposed method and transfer learning from unimpaired speech to alleviate the low-resource problem associated with impaired speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Divya Degala|AUTHOR Divya Degala]]^^1^^, [[Achuth Rao M.V.|AUTHOR Achuth Rao M.V.]]^^1^^, [[Rahul Krishnamurthy|AUTHOR Rahul Krishnamurthy]]^^2^^, [[Pebbili Gopikishore|AUTHOR Pebbili Gopikishore]]^^3^^, [[Veeramani Priyadharshini|AUTHOR Veeramani Priyadharshini]]^^3^^, [[Prakash T.K.|AUTHOR Prakash T.K.]]^^3^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^MAHE, India; ^^3^^AIISH, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4801–4805&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Laryngeal videostroboscopy is widely used for the analysis of glottal vibration patterns. This analysis plays a crucial role in the diagnosis of voice disorders. It is essential to study these patterns using automatic glottis segmentation methods to avoid subjectiveness in diagnosis. Glottis detection is an essential step before glottis segmentation. This paper considers the problem of automatic glottis segmentation using U-Net based deep convolutional networks. For accurate glottis detection, we train a fully convolutional network with a large amount of glottal and non-glottal images. In glottis segmentation, we consider U-Net with three different weight initialization schemes: 1) Random weight Initialization (RI), 2) Detection Network weight Initialization (DNI) and 3) Detection Network encoder frozen weight Initialization (DNIFr), using two different architectures: 1) U-Net without skip connection (UWSC) 2) U-Net with skip connection (USC). Experiments with 22 subjects’ data reveal that the performance of glottis segmentation network can be increased by initializing its weights using those of the glottis detection network. Among all schemes, when DNI is used, the USC yields an average localization accuracy of 81.3% and a Dice score of 0.73, which are better than those from the baseline approach by 15.87% and 0.07 (absolute), respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yilin Pan|AUTHOR Yilin Pan]]^^1^^, [[Bahman Mirheidari|AUTHOR Bahman Mirheidari]]^^1^^, [[Zehai Tu|AUTHOR Zehai Tu]]^^1^^, [[Ronan O’Malley|AUTHOR Ronan O’Malley]]^^1^^, [[Traci Walker|AUTHOR Traci Walker]]^^2^^, [[Annalena Venneri|AUTHOR Annalena Venneri]]^^1^^, [[Markus Reuber|AUTHOR Markus Reuber]]^^3^^, [[Daniel Blackburn|AUTHOR Daniel Blackburn]]^^1^^, [[Heidi Christensen|AUTHOR Heidi Christensen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Sheffield, UK; ^^2^^University of Sheffield, UK; ^^3^^Royal Hallamshire Hospital, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4806–4810&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech-based automatic approaches for detecting neurodegenerative disorders (ND) and mild cognitive impairment (MCI) have received more attention recently due to being non-invasive and potentially more sensitive than current pen-and-paper tests. The performance of such systems is highly dependent on the choice of features in the classification pipeline. In particular for acoustic features, arriving at a consensus for a best feature set has proven challenging. This paper explores using deep neural network for extracting features directly from the speech signal as a solution to this. Compared with hand-crafted features, more information is present in the raw waveform, but the feature extraction process becomes more complex and less interpretable which is often undesirable in medical domains. Using a SincNet as a first layer allows for some analysis of learned features. We propose and evaluate the Sinc-CLA (with SincNet, Convolutional, Long Short-Term Memory and Attention layers) as a task-driven acoustic feature extractor for classifying MCI, ND and healthy controls (HC). Experiments are carried out on an in-house dataset. Compared with the popular hand-crafted feature sets, the learned task-driven features achieve a superior classification accuracy. The filters of the SincNet is inspected and acoustic differences between HC, MCI and ND are found.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Neeraj Sharma|AUTHOR Neeraj Sharma]], [[Prashant Krishnan|AUTHOR Prashant Krishnan]], [[Rohit Kumar|AUTHOR Rohit Kumar]], [[Shreyas Ramoji|AUTHOR Shreyas Ramoji]], [[Srikanth Raj Chetupalli|AUTHOR Srikanth Raj Chetupalli]], [[Nirmala R.|AUTHOR Nirmala R.]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4811–4815&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The COVID-19 pandemic presents global challenges transcending boundaries of country, race, religion, and economy. The current gold standard method for COVID-19 detection is the reverse transcription polymerase chain reaction (RT-PCR) testing. However, this method is expensive, time-consuming, and violates social distancing. Also, as the pandemic is expected to stay for a while, there is a need for an alternate diagnosis tool which overcomes these limitations, and is deployable at a large scale. The prominent symptoms of COVID-19 include cough and breathing difficulties. We foresee that respiratory sounds, when analyzed using machine learning techniques, can provide useful insights, enabling the design of a diagnostic tool. Towards this, the paper presents an early effort in creating (and analyzing) a database, called Coswara, of respiratory sounds, namely, cough, breath, and voice. The sound samples are collected via worldwide crowdsourcing using a website application. The curated dataset is released as open access. As the pandemic is evolving, the data collection and analysis is a work in progress. We believe that insights from analysis of Coswara can be effective in enabling sound based technology solutions for point-of-care diagnosis of respiratory infection, and in the near future this can help to diagnose COVID-19.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hannah P. Rowe|AUTHOR Hannah P. Rowe]], [[Sarah E. Gutz|AUTHOR Sarah E. Gutz]], [[Marc F. Maffei|AUTHOR Marc F. Maffei]], [[Jordan R. Green|AUTHOR Jordan R. Green]]
</p><p class="cpabstractcardaffiliationlist">MGH Institute of Health Professions, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4816–4820&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The purpose of this study was to determine the articulatory phenotypes of amyotrophic lateral sclerosis (ALS) and Parkinson’s disease (PD) using a novel acoustic-based framework that assesses five key components of motor performance: //Coordination, Consistency, Speed, Precision//, and //Rate//. The use of interpretable, hypothesis-driven features has the potential to inform impairment-based automatic speech recognition (ASR) models and improve classification algorithms for disorders with divergent articulatory profiles. Acoustic features were extracted from audio recordings of 18 healthy controls, 18 participants with ALS, and 18 participants with PD producing syllable sequences. Results revealed significantly different articulatory phenotypes for each disorder group. Upon stratification into Early Stage and Late Stage in disease progression, results from individual receiver operating characteristic (ROC) curves and decision tree analyses showed high diagnostic accuracy for impaired //Coordination// in the Early Stage and impaired //Rate// in the Late Stage. With additional research, articulatory phenotypes characterized using this framework may lead to advancements in ASR for dysarthric speech and diagnostic accuracy at different disease stages for individuals with distinct articulatory deficits.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lubna Alhinti|AUTHOR Lubna Alhinti]], [[Stuart Cunningham|AUTHOR Stuart Cunningham]], [[Heidi Christensen|AUTHOR Heidi Christensen]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4821–4825&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Effective communication relies on the comprehension of both verbal and nonverbal information. People with dysarthria may lose their ability to produce intelligible and audible speech sounds which in time may affect their way of conveying emotions, that are mostly expressed using nonverbal signals. Recent research shows some promise on automatically recognising the verbal part of dysarthric speech. However, this is the first study that investigates the ability to automatically recognise the nonverbal part. A parallel database of dysarthric and typical emotional speech is collected, and approaches to discriminating between emotions using models trained on either dysarthric (speaker dependent, //matched//) or typical (speaker independent, //unmatched//) speech are investigated for four speakers with dysarthria caused by cerebral palsy and Parkinson’s disease. Promising results are achieved in both scenarios using SVM classifiers, opening new doors to improved, more expressive voice input communication aids.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ewan Dunbar|AUTHOR Ewan Dunbar]]^^1^^, [[Julien Karadayi|AUTHOR Julien Karadayi]]^^2^^, [[Mathieu Bernard|AUTHOR Mathieu Bernard]]^^2^^, [[Xuan-Nga Cao|AUTHOR Xuan-Nga Cao]]^^2^^, [[Robin Algayres|AUTHOR Robin Algayres]]^^2^^, [[Lucas Ondel|AUTHOR Lucas Ondel]]^^3^^, [[Laurent Besacier|AUTHOR Laurent Besacier]]^^4^^, [[Sakriani Sakti|AUTHOR Sakriani Sakti]]^^5^^, [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LLF (UMR 7110), France; ^^2^^LSCP (UMR 8554), France; ^^3^^Brno University of Technology, Czech Republic; ^^4^^LIG (UMR 5217), France; ^^5^^NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4831–4835&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present the Zero Resource Speech Challenge 2020, which aims at learning speech representations from raw audio signals without any labels. It combines the data sets and metrics from two previous benchmarks (2017 and 2019) and features two tasks which tap into two levels of speech representation. The first task is to discover low bit-rate subword representations that optimize the quality of speech synthesis; the second one is to discover word-like units from unsegmented raw speech. We present the results of the twenty submitted models and discuss the implications of the main findings for unsupervised speech learning.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Saurabhchand Bhati|AUTHOR Saurabhchand Bhati]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Piotr Żelasko|AUTHOR Piotr Żelasko]], [[Najim Dehak|AUTHOR Najim Dehak]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4876–4880&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Unsupervised spoken term discovery consists of two tasks: finding the acoustic segment boundaries and labeling acoustically similar segments with the same labels. We perform segmentation based on the assumption that the frame feature vectors are more similar within a segment than across the segments. Therefore, for strong segmentation performance, it is crucial that the features represent the phonetic properties of a frame more than other factors of variability. We achieve this via a self-expressing autoencoder framework. It consists of a single encoder and two decoders with shared weights. The encoder projects the input features into a latent representation. One of the decoders tries to reconstruct the input from these latent representations and the other from the self-expressed version of them. We use the obtained features to segment and cluster the speech data. We evaluate the performance of the proposed method in the Zero Resource 2020 challenge unit discovery task. The proposed system consistently outperforms the baseline, demonstrating the usefulness of the method in learning representations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Juliette Millet|AUTHOR Juliette Millet]], [[Ewan Dunbar|AUTHOR Ewan Dunbar]]
</p><p class="cpabstractcardaffiliationlist">LSCP (UMR 8554), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4881–4885&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a data set and methods to compare speech processing models and human behaviour on a phone discrimination task. We provide Perceptimatic, an open data set which consists of French and English speech stimuli, as well as the results of 91 English- and 93 French-speaking listeners. The stimuli test a wide range of French and English contrasts, and are extracted directly from corpora of natural running read speech, used for the 2017 Zero Resource Speech Challenge. We provide a method to compare humans’ perceptual space with models’ representational space, and we apply it to models previously submitted to the Challenge. We show that, unlike unsupervised models and supervised multilingual models, a standard supervised monolingual HMM-GMM phone recognition system, while good at discriminating phones, yields a representational space very different from that of human native listeners.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jonathan Clayton|AUTHOR Jonathan Clayton]], [[Scott Wellington|AUTHOR Scott Wellington]], [[Cassia Valentini-Botinhao|AUTHOR Cassia Valentini-Botinhao]], [[Oliver Watts|AUTHOR Oliver Watts]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4886–4890&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigate the use of a 14-channel, mobile EEG device in the decoding of heard, imagined, and articulated English phones from brainwave data. To this end we introduce a dataset that fills a current gap in the range of available open-access EEG datasets for speech processing with lightweight, affordable EEG devices made for the consumer market. We investigate the effectiveness of two classification models and a regression model for reconstructing spectral features of the original speech signal. We report that our classification performance is almost on a par with similar findings that use EEG data collected with research-grade devices. We conclude that commercial-grade devices can be used as speech-decoding BCIs with minimal signal processing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gurunath Reddy M.|AUTHOR Gurunath Reddy M.]], [[K. Sreenivasa Rao|AUTHOR K. Sreenivasa Rao]], [[Partha Pratim Das|AUTHOR Partha Pratim Das]]
</p><p class="cpabstractcardaffiliationlist">IIT Kharagpur, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4891–4895&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Electroglottography is a non-invasive technique to acquire the vocal folds activity across the larynx called EGG signal. The EGG is a clean signal free from vocal tract resonances, the parameters extracted from such a signal finds many applications in clinical and speech processing technology. In this paper, we propose a classification based approach to detect the significant parameter of the EGG such as glottal closure instant (GCI). We train deep convolutional neural networks (CNN) to predict if a frame of samples contain GCI location. Further, the GCI location within the frame is obtained by exploiting its unique manifestation from its first order derivative. We train several CNN models to determine the suitable input feature representation to efficiently detect the GCI location. Further, we train and evaluate the models on multiple speaker dataset to determine and eliminate any bias towards the speaker. We also show that the GCI identification rate can be improved significantly by the model trained with joint EGG and derivative (dEGG) signal. The deep models are trained with manually annotated GCI markers obtained from dEGG as reference. The objective evaluation measures confirmed that the proposed method is comparable and better than the traditional signal processing GCI detection methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hua Li|AUTHOR Hua Li]]^^1^^, [[Fei Chen|AUTHOR Fei Chen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Shenzhen University, China; ^^2^^SUSTech, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4896–4900&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech synthesis system based on non-invasive brain-computer interface technology has the potential to restore communication abilities to patients with communication disorders. To this end, electroencephalogram (EEG) based speech imagery technology is fast evolving largely due to its advantages of simple implementation and low dependence on external stimuli. This work studied possible factors accounting for the classification accuracies of EEG-based imaginary Mandarin tones, which has significance to the development of BCI-based Mandarin speech synthesis system. Specially, a Mandarin tone imagery experiment was designed, and this work studied the effects of electrode configuration and tone cuing on accurately classifying four Mandarin tones from cortical EEG signals. Results showed that the involvement of more activated brain regions (i.e., Broca’s area, Wernicke’s area, and primary motor cortex) provided a more accurate classification of imaginary Mandarin tones than that of one specific region. At the tone cue stage, using audio-visual stimuli led to a much stronger and more separable activation of brain regions than using visual-only stimuli. In addition, the classification accuracies of tone 1 and tone 4 were significantly higher than those of tone 2 and tone 3.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Benjamin van Niekerk|AUTHOR Benjamin van Niekerk]], [[Leanne Nortje|AUTHOR Leanne Nortje]], [[Herman Kamper|AUTHOR Herman Kamper]]
</p><p class="cpabstractcardaffiliationlist">Stellenbosch University, South Africa</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4836–4840&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we explore vector quantization for acoustic unit discovery. Leveraging unlabelled data, we aim to learn discrete representations of speech that separate phonetic content from speaker-specific details. We propose two neural models to tackle this challenge — both use vector quantization to map continuous features to a finite set of codes. The first model is a type of vector-quantized variational autoencoder (VQ-VAE). The VQ-VAE encodes speech into a sequence of discrete units before reconstructing the audio waveform. Our second model combines vector quantization with contrastive predictive coding (VQ-CPC). The idea is to learn a representation of speech by predicting future acoustic units. We evaluate the models on English and Indonesian data for the //ZeroSpeech 2020// challenge. In ABX phone discrimination tests, both models outperform all submissions to the 2019 and 2020 challenges, with a relative improvement of more than 30%. The models also perform competitively on a downstream voice conversion task. Of the two, VQ-CPC performs slightly better in general and is simpler and faster to train. Finally, probing experiments show that vector quantization is an effective bottleneck, forcing the models to discard speaker information.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Karthik Pandia D.S.|AUTHOR Karthik Pandia D.S.]], [[Anusha Prakash|AUTHOR Anusha Prakash]], [[Mano Ranjith Kumar M.|AUTHOR Mano Ranjith Kumar M.]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]
</p><p class="cpabstractcardaffiliationlist">IIT Madras, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4841–4845&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A Spoken dialogue system for an unseen language is referred to as Zero resource speech. It is especially beneficial for developing applications for languages that have low digital resources. Zero resource speech synthesis is the task of building text-to-speech (TTS) models in the absence of transcriptions.

In this work, speech is modelled as a sequence of transient and steady-state acoustic units, and a unique set of acoustic units is discovered by iterative training. Using the acoustic unit sequence, TTS models are trained.

The main goal of this work is to improve the synthesis quality of zero resource TTS system. Four different systems are proposed. All the systems consist of three stages — unit discovery, followed by unit sequence to spectrogram mapping, and finally spectrogram to speech inversion. Modifications are proposed to the spectrogram mapping stage. These modifications include training the mapping on voice data, using x-vectors to improve the mapping, two-stage learning, and gender-specific modelling. Evaluation of the proposed systems in the Zerospeech 2020 challenge shows that quite good quality synthesis can be achieved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Batuhan Gundogdu|AUTHOR Batuhan Gundogdu]], [[Bolaji Yusuf|AUTHOR Bolaji Yusuf]], [[Mansur Yesilbursa|AUTHOR Mansur Yesilbursa]], [[Murat Saraclar|AUTHOR Murat Saraclar]]
</p><p class="cpabstractcardaffiliationlist">Boğaziçi University, Turkey</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4846–4850&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A recent task posed by the Zerospeech challenge is the unsupervised learning of the basic acoustic units that exist in an unknown language. Previously, we introduced recurrent sparse autoencoders fine-tuned with corresponding speech segments obtained by unsupervised term discovery. There, the clustering was obtained on the intermediate layer where the nodes represent the acoustic unit assignments. In this paper, we extend this system by incorporating vector quantization and an adaptation of the winner-take-all networks. This way, symbol continuity could be enforced by excitatory and inhibitory weights along the temporal axis. Furthermore, in this work, we utilized the speaker information in a speaker adversarial training on the encoder. The ABX discriminability and the low bitrate results of our proposed approach on the Zerospeech 2020 challenge demonstrate the effect of the enhanced continuity of the encoding brought by the temporal-awareness and sparsity techniques proposed in this work.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andros Tjandra|AUTHOR Andros Tjandra]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]
</p><p class="cpabstractcardaffiliationlist">NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4851–4855&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we report our submitted system for the ZeroSpeech 2020 challenge on Track 2019. The main theme in this challenge is to build a speech synthesizer without any textual information or phonetic labels. In order to tackle those challenges, we build a system that must address two major components such as 1) given speech audio, extract subword units in an unsupervised way and 2) re-synthesize the audio from novel speakers. The system also needs to balance the codebook performance between the ABX error rate and the bitrate compression rate. Our main contribution here is we proposed Transformer-based VQ-VAE for unsupervised unit discovery and Transformer-based inverter for the speech synthesis given the extracted codebook. Additionally, we also explored several regularization methods to improve performance even further.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takashi Morita|AUTHOR Takashi Morita]], [[Hiroki Koda|AUTHOR Hiroki Koda]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4856–4860&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we reported our exploration of Text-To-Speech without Text (TTS without T) in the Zero Resource Speech Challenge 2020, in which participants proposed an end-to-end, unsupervised system that learned speech recognition and TTS together. We addressed the challenge using biologically/psychologically motivated modules of Artificial Neural Networks (ANN), with a particular interest in unsupervised learning of human language as a biological/psychological problem. The system first processes Mel Frequency Cepstral Coefficient (MFCC) frames with an Echo-State Network (ESN), and simulates computations in cortical microcircuits. The outcome is discretized by our original Variational Autoencoder (VAE) that implements the Dirichlet-based Bayesian clustering widely accepted in computational linguistics and cognitive science. The discretized signal is then reverted into sound waveform via a neural-network implementation of the source-filter model for speech production.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Tomoki Toda|AUTHOR Tomoki Toda]]
</p><p class="cpabstractcardaffiliationlist">Nagoya University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4861–4865&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel approach of cyclic spectral modeling for unsupervised discovery of speech units into voice conversion with excitation network and waveform modeling. Specifically, we propose two spectral modeling techniques: 1) cyclic vector-quantized autoencoder (CycleVQVAE), and 2) cyclic variational autoencoder (CycleVAE). In CycleVQVAE, a discrete latent space is used for the speech units, whereas, in CycleVAE, a continuous latent space is used. The cyclic structure is developed using the reconstruction flow and the cyclic reconstruction flow of spectral features, where the latter is obtained by recycling the converted spectral features. This method is used to obtain a possible speaker-independent latent space because of marginalization on all possible speaker conversion pairs during training. On the other hand, speaker-dependent space is conditioned with a one-hot speaker-code. Excitation modeling is developed in a separate manner for CycleVQVAE, while it is in a joint manner for CycleVAE. To generate speech waveform, WaveNet-based waveform modeling is used. The proposed framework is entried for the ZeroSpeech Challenge 2020, and is capable of reaching a character error rate of 0.21, a speaker similarity score of 3.91, a mean opinion score of 3.84 for the naturalness of the converted speech in the 2019 voice conversion task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mingjie Chen|AUTHOR Mingjie Chen]], [[Thomas Hain|AUTHOR Thomas Hain]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4866–4870&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Unsupervised representation learning of speech has been of keen interest in recent years, which is for example evident in the wide interest of the ZeroSpeech challenges. This work presents a new method for learning frame level representations based on WaveNet auto-encoders. Of particular interest in the ZeroSpeech Challenge 2019 were models with discrete latent variable such as the Vector Quantized Variational Auto-Encoder (VQVAE). However these models generate speech with relatively poor quality. In this work we aim to address this with two approaches: first WaveNet is used as the decoder and to generate waveform data directly from the latent representation; second, the low complexity of latent representations is improved with two alternative disentanglement learning methods, namely instance normalization and sliced vector quantization. The method was developed and tested in the context of the recent ZeroSpeech challenge 2020. The system output submitted to the challenge obtained the top position for naturalness (Mean Opinion Score 4.06), top position for intelligibility (Character Error Rate 0.15), and third position for the quality of the representation (ABX test score 12.5). These and further analysis in this paper illustrates that quality of the converted speech and the acoustic units representation can be well balanced.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Okko Räsänen|AUTHOR Okko Räsänen]], [[María Andrea Cruz Blandón|AUTHOR María Andrea Cruz Blandón]]
</p><p class="cpabstractcardaffiliationlist">Tampere University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4871–4875&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Unsupervised spoken term discovery (UTD) aims at finding recurring segments of speech from a corpus of acoustic speech data. One potential approach to this problem is to use dynamic time warping (DTW) to find well-aligning patterns from the speech data. However, automatic selection of initial candidate segments for the DTW-alignment and detection of “sufficiently good” alignments among those require some type of predefined criteria, often operationalized as threshold parameters for pair-wise distance metrics between signal representations. In the existing UTD systems, the optimal hyperparameters may differ across datasets, limiting their applicability to new corpora and truly low-resource scenarios. In this paper, we propose a novel probabilistic approach to DTW-based UTD named as PDTW. In PDTW, distributional characteristics of the processed corpus are utilized for adaptive evaluation of alignment quality, thereby enabling systematic discovery of pattern pairs that have similarity what would be expected by coincidence. We test PDTW on Zero Resource Speech Challenge 2017 datasets as a part of 2020 implementation of the challenge. The results show that the system performs consistently on all five tested languages using fixed hyperparameters, clearly outperforming the earlier DTW-based system in terms of coverage of the detected patterns.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Johanes Effendi|AUTHOR Johanes Effendi]], [[Andros Tjandra|AUTHOR Andros Tjandra]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]
</p><p class="cpabstractcardaffiliationlist">NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4901–4905&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous research has proposed a machine speech chain to enable automatic speech recognition (ASR) and text-to-speech synthesis (TTS) to assist each other in semi-supervised learning and to avoid the need for a large amount of paired speech and text data. However, that framework still requires a large amount of unpaired (speech or text) data. A prototype multimodal machine chain was then explored to further reduce the need for a large amount of unpaired data, which could improve ASR or TTS even when no more speech or text data were available. Unfortunately, this framework relied on the image retrieval (IR) model, and thus it was limited to handling only those images that were already known during training. Furthermore, the performance of this framework was only investigated with single-speaker artificial speech data. In this study, we revamp the multimodal machine chain framework with image generation (IG) and investigate the possibility of augmenting image data for ASR and TTS using single-loop and dual-loop architectures on multispeaker natural speech data. Experimental results revealed that both single-loop and dual-loop multimodal chain frameworks enabled ASR and TTS to improve their performance using an image-only dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Łukasz Augustyniak|AUTHOR Łukasz Augustyniak]]^^1^^, [[Piotr Szymański|AUTHOR Piotr Szymański]]^^1^^, [[Mikołaj Morzy|AUTHOR Mikołaj Morzy]]^^2^^, [[Piotr Żelasko|AUTHOR Piotr Żelasko]]^^3^^, [[Adrian Szymczak|AUTHOR Adrian Szymczak]]^^1^^, [[Jan Mizgajski|AUTHOR Jan Mizgajski]]^^1^^, [[Yishay Carmiel|AUTHOR Yishay Carmiel]]^^1^^, [[Najim Dehak|AUTHOR Najim Dehak]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Avaya, USA; ^^2^^Poznan University of Technology, Poland; ^^3^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4906–4910&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic Speech Recognition (ASR) systems introduce word errors, which often confuse punctuation prediction models, turning punctuation restoration into a challenging task. These errors usually take the form of homophones (words which share exact or almost exact pronunciation but differ in meaning) and oronyms (homophones which consist of multiple words). We show how retrofitting of the word embeddings on the domain-specific data can mitigate ASR errors. Our main contribution is a method for a better alignment of homophone embeddings and the validation of the presented method on the punctuation prediction task. We record the absolute improvement in punctuation prediction accuracy between 6.2% (for question marks) to 9% (for periods) when compared with the state-of-the-art model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Monica Sunkara|AUTHOR Monica Sunkara]], [[Srikanth Ronanki|AUTHOR Srikanth Ronanki]], [[Dhanush Bekal|AUTHOR Dhanush Bekal]], [[Sravan Bodapati|AUTHOR Sravan Bodapati]], [[Katrin Kirchhoff|AUTHOR Katrin Kirchhoff]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4911–4915&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we explore a multimodal semi-supervised learning approach for punctuation prediction by learning representations from large amounts of unlabelled audio and text data. Conventional approaches in speech processing typically use forced alignment to encoder per frame acoustic features to word level features and perform multimodal fusion of the resulting acoustic and lexical representations. As an alternative, we explore attention based multimodal fusion and compare its performance with forced alignment based fusion. Experiments conducted on the Fisher corpus show that our proposed approach achieves ~6–9% and ~3–4% absolute improvement (F1 score) over the baseline BLSTM model on reference transcripts and ASR outputs respectively. We further improve the model robustness to ASR errors by performing data augmentation with N-best lists which achieves up to an additional ~2–6% improvement on ASR outputs. We also demonstrate the effectiveness of semi-supervised learning approach by performing ablation study on various sizes of the corpus. When trained on 1 hour of speech and text data, the proposed model achieved ~9–18% absolute improvement over baseline model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruizhe Huang|AUTHOR Ruizhe Huang]]^^1^^, [[Ke Li|AUTHOR Ke Li]]^^1^^, [[Ashish Arora|AUTHOR Ashish Arora]]^^1^^, [[Daniel Povey|AUTHOR Daniel Povey]]^^2^^, [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^Xiaomi, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4916–4920&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents an efficient algorithm for n-gram language model adaptation under the minimum discrimination information (MDI) principle, where an out-of-domain language model is adapted to satisfy the constraints of marginal probabilities of the in-domain data. The challenge for MDI language model adaptation is its computational complexity. By taking advantage of the backoff structure of n-gram model and the idea of hierarchical training method, originally proposed for maximum entropy (ME) language models [1], we show that MDI adaptation can be computed in linear-time complexity to the inputs in each iteration. The complexity remains the same as ME models, although MDI is more general than ME. This makes MDI adaptation practical for large corpus and vocabulary. Experimental results confirm the scalability of our algorithm on large datasets, while MDI adaptation gets slightly worse perplexity but better word error rates compared to simple linear interpolation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cal Peyser|AUTHOR Cal Peyser]], [[Sepand Mavandadi|AUTHOR Sepand Mavandadi]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[James Apfel|AUTHOR James Apfel]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[Shankar Kumar|AUTHOR Shankar Kumar]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4921–4925&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end (E2E) automatic speech recognition (ASR) systems lack the distinct language model (LM) component that characterizes traditional speech systems. While this simplifies the model architecture, it complicates the task of incorporating text-only data into training, which is important to the recognition of tail words that do not occur often in audio-text pairs. While shallow fusion has been proposed as a method for incorporating a pre-trained LM into an E2E model at inference time, it has not yet been explored for very large text corpora, and it has been shown to be very sensitive to hyperparameter settings in the beam search. In this work, we apply shallow fusion to incorporate a very large text corpus into a state-of-the-art E2E ASR model. We explore the impact of model size and show that intelligent pruning of the training set can be more effective than increasing the parameter count. Additionally, we show that incorporating the LM in minimum word error rate (MWER) fine tuning makes shallow fusion far less dependent on optimal hyperparameter settings, reducing the difficulty of that tuning problem.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Naohiro Tawara|AUTHOR Naohiro Tawara]], [[Marc Delcroix|AUTHOR Marc Delcroix]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4926–4930&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To improve the performance of automatic speech recognition (ASR) for a specific domain, it is essential to train a language model (LM) using text data of the target domain. In this study, we propose a method to transfer the domain of a large amount of source data to the target domain and augment the data to train a target domain-specific LM. The proposed method consists of two steps, which use a bidirectional long short-term memory (BLSTM)-based word replacing model and a target domain-adapted LSTMLM, respectively. Based on the learned domain-specific wordings, the word replacing model converts a given source domain sentence to a confusion network (CN) that includes a variety of target domain candidate word sequences. Then, the LSTMLM selects a target domain sentence from the CN by evaluating its grammatical correctness based on decoding scores. In experiments using lecture and conversational speech corpora as the source and target domain data sets, we confirmed that the proposed LM data augmentation method improves the target conversational speech recognition performance of a hybrid ASR system using an n-gram LM and the performance of N-best rescoring using an LSTMLM.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Krzysztof Wołk|AUTHOR Krzysztof Wołk]]
</p><p class="cpabstractcardaffiliationlist">PJAIT, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4931–4935&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Language and vocabulary continue to evolve in this era of big data, making language modelling an important language processing task that benefits from the enormous data in different languages provided by web-based corpora. In this paper, we present a set of 6-gram language models based on a big-data training of the contemporary Polish language, using the Common Crawl corpus (a compilation of over 3.25 billion webpages) and other resources. The corpus is provided in different combinations of POS-tagged, grammatical groups-tagged, and sub-word-divided versions of raw corpora and trained models. The dictionary of contemporary Polish was updated and presented, and we used the KENLM toolkit to train big-data language models in ARPA format. Additionally, we have provided pre-trained vector models. The language model was trained, and the advances in BLEU score were obtained in MT systems along with the perplexity values, utilizing our models. The superiority of our model over Google’s WEB1T n-gram counts and the first version of our model was demonstrated through experiments, and the results illustrated that it guarantees improved quality in perplexity and machine translation. Our models can be applied in several natural language processing tasks and several scientific interdisciplinary fields.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Prabhat Pandey|AUTHOR Prabhat Pandey]], [[Volker Leutnant|AUTHOR Volker Leutnant]], [[Simon Wiesler|AUTHOR Simon Wiesler]], [[Jahn Heymann|AUTHOR Jahn Heymann]], [[Daniel Willett|AUTHOR Daniel Willett]]
</p><p class="cpabstractcardaffiliationlist">Amazon, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4936–4940&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Traditional hybrid speech recognition systems use a fixed vocabulary for recognition, which is a challenge for agglutinative and compounding languages due to the presence of large number of rare words. This causes high out-of-vocabulary rate and leads to poor probability estimates for rare words. It is also important to keep the vocabulary size in check for a low-latency WFST-based speech recognition system. Previous works have addressed this problem by utilizing subword units in the language model training and merging them back to reconstruct words in the post-processing step. In this paper, we extend such open vocabulary approaches by focusing on compounding aspect. We present a data-driven unsupervised method to identify compound words in the vocabulary and learn rules to segment them. We show that compound modeling can achieve 3% to 8% relative reduction in word error rate and up to 9% reduction in the vocabulary size compared to word-based models. We also show the importance of consistency between the lexicon employed during decoding and acoustic model training for subword-based systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Simone Wills|AUTHOR Simone Wills]], [[Pieter Uys|AUTHOR Pieter Uys]], [[Charl van Heerden|AUTHOR Charl van Heerden]], [[Etienne Barnard|AUTHOR Etienne Barnard]]
</p><p class="cpabstractcardaffiliationlist">Saigen, South Africa</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4941–4945&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Different language modeling approaches are evaluated on two under-resourced, agglutinative, South African languages; Sesotho and isiZulu. The two languages present different challenges to language modeling based on their respective orthographies; isiZulu is conjunctively written whereas Sotho is disjunctively written. Two subword modeling approaches are evaluated and shown to be useful to reduce the OOV rate for isiZulu, and for Sesotho, a multi-word approach is evaluated for improving ASR accuracy, with limited success. RNNs are also evaluated and shown to slightly improve ASR accuracy, despite relatively small text corpora.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jing Han|AUTHOR Jing Han]]^^1^^, [[Kun Qian|AUTHOR Kun Qian]]^^2^^, [[Meishu Song|AUTHOR Meishu Song]]^^1^^, [[Zijiang Yang|AUTHOR Zijiang Yang]]^^1^^, [[Zhao Ren|AUTHOR Zhao Ren]]^^1^^, [[Shuo Liu|AUTHOR Shuo Liu]]^^1^^, [[Juan Liu|AUTHOR Juan Liu]]^^3^^, [[Huaiyuan Zheng|AUTHOR Huaiyuan Zheng]]^^3^^, [[Wei Ji|AUTHOR Wei Ji]]^^3^^, [[Tomoya Koike|AUTHOR Tomoya Koike]]^^2^^, [[Xiao Li|AUTHOR Xiao Li]]^^4^^, [[Zixing Zhang|AUTHOR Zixing Zhang]]^^5^^, [[Yoshiharu Yamamoto|AUTHOR Yoshiharu Yamamoto]]^^2^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Augsburg, Germany; ^^2^^University of Tokyo, Japan; ^^3^^HUST, China; ^^4^^CHCMU, China; ^^5^^Imperial College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4946–4950&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The COVID-19 outbreak was announced as a global pandemic by the World Health Organisation in March 2020 and has affected a growing number of people in the past few weeks. In this context, advanced artificial intelligence techniques are brought to the fore in responding to fight against and reduce the impact of this global health crisis. In this study, we focus on developing some potential use-cases of intelligent speech analysis for COVID-19 diagnosed patients. In particular, by analysing speech recordings from these patients, we construct audio-only-based models to automatically categorise the health state of patients from four aspects, including the severity of illness, sleep quality, fatigue, and anxiety. For this purpose, two established acoustic feature sets and support vector machines are utilised. Our experiments show that an average accuracy of .69 obtained estimating the severity of illness, which is derived from the number of days in hospitalisation. We hope that this study can foster an extremely fast, low-cost, and convenient way to automatically detect the COVID-19 disease.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ina Kodrasi|AUTHOR Ina Kodrasi]]^^1^^, [[Michaela Pernon|AUTHOR Michaela Pernon]]^^2^^, [[Marina Laganaro|AUTHOR Marina Laganaro]]^^2^^, [[Hervé Bourlard|AUTHOR Hervé Bourlard]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Idiap Research Institute, Switzerland; ^^2^^Université de Genève, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4991–4995&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To assist clinicians in the differential diagnosis and treatment of motor speech disorders, it is imperative to establish objective tools which can reliably characterize different subtypes of disorders such as apraxia of speech (AoS) and dysarthria. Objective tools in the context of speech disorders typically rely on thousands of acoustic features, which raises the risk of difficulties in the interpretation of the underlying mechanisms, over-adaptation to training data, and weak generalization capabilities to test data. Seeking to use a small number of acoustic features and motivated by the clinical-perceptual signs used for the differential diagnosis of AoS and dysarthria, we propose to characterize differences between AoS and dysarthria using only six handcrafted acoustic features, with three features reflecting segmental distortions, two features reflecting loudness and hypernasality, and one feature reflecting syllabification. These three different sets of features are used to separately train three classifiers. At test time, the decisions of the three classifiers are combined through a simple majority voting scheme. Preliminary results show that the proposed approach achieves a discrimination accuracy of 90%, outperforming using state-of-the-art features such as openSMILE which yield a discrimination accuracy of 65%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alice Baird|AUTHOR Alice Baird]]^^1^^, [[Nicholas Cummins|AUTHOR Nicholas Cummins]]^^1^^, [[Sebastian Schnieder|AUTHOR Sebastian Schnieder]]^^2^^, [[Jarek Krajewski|AUTHOR Jarek Krajewski]]^^3^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Augsburg, Germany; ^^2^^HMKW, Germany; ^^3^^IXP, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4951–4955&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The current level of global uncertainty is having an implicit effect on those with a diagnosed anxiety disorder. Anxiety can impact vocal qualities, particularly as physical symptoms of anxiety include muscle tension and shortness of breath. To this end, in this study, we explore the effect of anxiety on speech — focusing on four classes of sustained vowels (//sad, smiling, comfortable,// and //powerful//) — via feature analysis and a series of regression experiments. We extract three well-known acoustic feature sets and evaluate the efficacy of machine learning for prediction of anxiety based on the Beck Anxiety Inventory (BAI) score. Of note, utilising a support vector regressor, we find that the effects of anxiety in speech appear to be stronger at higher BAI levels. Significant differences (p < 0.05) between test predictions of //Low// and //High-BAI// groupings support this. Furthermore, when utilising a //High-BAI// grouping for the prediction of standardised BAI, significantly higher results are obtained for //smiling// sustained vowels, of up to 0.646 Spearman’s Correlation Coefficient (ρ), and up to 0.592 ρ with all sustained vowels. A significantly stronger (Cohens d of 1.718) result than all data combined without grouping, which achieves at best 0.234 ρ.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ziping Zhao|AUTHOR Ziping Zhao]]^^1^^, [[Qifei Li|AUTHOR Qifei Li]]^^1^^, [[Nicholas Cummins|AUTHOR Nicholas Cummins]]^^2^^, [[Bin Liu|AUTHOR Bin Liu]]^^3^^, [[Haishuai Wang|AUTHOR Haishuai Wang]]^^4^^, [[Jianhua Tao|AUTHOR Jianhua Tao]]^^3^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin Normal University, China; ^^2^^Universität Augsburg, Germany; ^^3^^CAS, China; ^^4^^Fairfield University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4956–4960&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A fast-growing area of mental health research is the search for speech-based objective markers for conditions such as depression. One vital challenge in the development of speech-based depression severity assessment systems is the extraction of depression-relevant features from speech signals. In order to deliver more comprehensive feature representation, we herein explore the benefits of a hybrid network that encodes depression-related characteristics in speech for the task of depression severity assessment. The proposed network leverages self-attention networks (SAN) trained on low-level acoustic features and deep convolutional neural networks (DCNN) trained on 3D Log-Mel spectrograms. The feature representations learnt in the SAN and DCNN are concatenated and average pooling is exploited to aggregate complementary segment-level features. Finally, support vector regression is applied to predict a speaker’s Beck Depression Inventory-II score. Experiments based on a subset of the Audio-Visual Depressive Language Corpus, as used in the 2013 and 2014 Audio/Visual Emotion Challenges, demonstrate the effectiveness of our proposed hybrid approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yilin Pan|AUTHOR Yilin Pan]]^^1^^, [[Bahman Mirheidari|AUTHOR Bahman Mirheidari]]^^1^^, [[Markus Reuber|AUTHOR Markus Reuber]]^^2^^, [[Annalena Venneri|AUTHOR Annalena Venneri]]^^1^^, [[Daniel Blackburn|AUTHOR Daniel Blackburn]]^^1^^, [[Heidi Christensen|AUTHOR Heidi Christensen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Sheffield, UK; ^^2^^University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4961–4965&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech and language based automatic dementia detection is of interest due to it being non-invasive, low-cost and potentially able to aid diagnosis accuracy. The collected data are mostly audio recordings of spoken language and these can be used directly for acoustic-based analysis. To extract linguistic-based information, an automatic speech recognition (ASR) system is used to generate transcriptions. However, the extraction of reliable acoustic features is difficult when the acoustic quality of the data is poor as is the case with DementiaBank, the largest opensource dataset for Alzheimer’s Disease classification. In this paper, we explore how to improve the robustness of the acoustic feature extraction by using time alignment information and confidence scores from the ASR system to identify audio segments of good quality. In addition, we design rhythm-inspired features and combine them with acoustic features. By classifying the combined features with a bidirectional-LSTM attention network, the F-measure improves from 62.15% to 70.75% when only the high-quality segments are used. Finally, we apply the same approach to our previously proposed hierarchical-based network using linguistic-based features and show improvement from 74.37% to 77.25%. By combining the acoustic and linguistic systems, a state-of-the-art 78.34% F-measure is achieved on the DementiaBank task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amrit Romana|AUTHOR Amrit Romana]]^^1^^, [[John Bandon|AUTHOR John Bandon]]^^1^^, [[Noelle Carlozzi|AUTHOR Noelle Carlozzi]]^^1^^, [[Angela Roberts|AUTHOR Angela Roberts]]^^2^^, [[Emily Mower Provost|AUTHOR Emily Mower Provost]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Michigan, USA; ^^2^^Northwestern University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4966–4970&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Huntington disease (HD) is a fatal autosomal dominant neurocognitive disorder that causes cognitive disturbances, neuropsychiatric symptoms, and impaired motor abilities (e.g., gait, speech, voice). Due to its progressive nature, HD treatment requires ongoing clinical monitoring of symptoms. Individuals with the Huntington gene mutation, which causes HD, may exhibit a range of speech symptoms as they progress from premanifest to manifest HD. Speech-based passive monitoring has the potential to augment clinical information by more continuously tracking manifestation symptoms. Differentiating between premanifest and manifest HD is an important yet understudied problem, as this distinction marks the need for increased treatment. In this work we present the first demonstration of how changes in speech can be measured to differentiate between premanifest and manifest HD. To do so, we focus on one speech symptom of HD: distorted vowels. We introduce a set of Filtered Vowel Distortion Measures (FVDM) which we extract from read speech. We show that FVDM, coupled with features from existing literature, can differentiate between premanifest and manifest HD with 80% accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sudarsana Reddy Kadiri|AUTHOR Sudarsana Reddy Kadiri]]^^1^^, [[Rashmi Kethireddy|AUTHOR Rashmi Kethireddy]]^^2^^, [[Paavo Alku|AUTHOR Paavo Alku]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^IIIT Hyderabad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4971–4975&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Parkinson’s disease (PD) is a progressive deterioration of the human central nervous system. Detection of PD (discriminating patients with PD from healthy subjects) from speech is a useful approach due to its non-invasive nature. This study proposes to use novel cepstral coefficients derived from the single frequency filtering (SFF) method, called as single frequency filtering cepstral coefficients (SFFCCs) for the detection of PD. SFF has been shown to provide higher spectro-temporal resolution compared to the short-time Fourier transform. The current study uses the PC-GITA database, which consists of speech from speakers with PD and healthy controls (50 males, 50 females). Our proposed detection system is based on the i-vectors derived from SFFCCs using SVM as a classifier. In the detection of PD, better performance was achieved when the i-vectors were computed from the proposed SFFCCs compared to the popular conventional MFCCs. Furthermore, we investigated the effect of temporal variations by deriving the shifted delta cepstral (SDC) coefficients using SFFCCs. These experiments revealed that the i-vectors derived from the proposed SFFCCs+SDC features gave an absolute improvement of 9% compared to the i-vectors derived from the baseline MFCCs+SDC features, indicating the importance of temporal variations in the detection of PD.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sebastião Quintas|AUTHOR Sebastião Quintas]]^^1^^, [[Julie Mauclair|AUTHOR Julie Mauclair]]^^1^^, [[Virginie Woisard|AUTHOR Virginie Woisard]]^^2^^, [[Julien Pinquier|AUTHOR Julien Pinquier]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IRIT (UMR 5505), France; ^^2^^CHU de Toulouse, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4976–4980&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the context of pathological speech, perceptual evaluation is still the most widely used method for intelligibility estimation. Despite being considered a staple in clinical settings, it has a well-known subjectivity associated with it, which results in greater variances and low reproducibility. On the other hand, due to the increasing computing power and latest research, automatic evaluation has become a growing alternative to perceptual assessments. In this paper we investigate an automatic prediction of speech intelligibility using the //x-vector// paradigm, in the context of head and neck cancer. Experimental evaluation of the proposed model suggests a high correlation rate when applied to our corpus of HNC patients (p = 0.85). Our approach also displayed the possibility of achieving very high correlation values (p = 0.95) when adapting the evaluation to each individual speaker, displaying a significantly more accurate prediction whilst using smaller amounts of data. These results can also provide valuable insight to the redevelopment of test protocols, which typically tend to be substantial and effort-intensive for patients.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ajish K. Abraham|AUTHOR Ajish K. Abraham]]^^1^^, [[M. Pushpavathi|AUTHOR M. Pushpavathi]]^^1^^, [[N. Sreedevi|AUTHOR N. Sreedevi]]^^1^^, [[A. Navya|AUTHOR A. Navya]]^^1^^, [[C.M. Vikram|AUTHOR C.M. Vikram]]^^2^^, [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^AIISH, India; ^^2^^IIT Guwahati, India; ^^3^^IIT Dharwad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4981–4985&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech development in children with hearing impairment (CHI) is hampered by inadequate auditory input. Speech of CHI has reduced intelligibility compared to typically developing children (TDC), mainly because of articulatory errors. Speech language pathologists (SLPs) assess these errors through perceptual evaluation and accordingly device the protocol to correct them through several sessions of speech therapy. Automatic methods need to be developed to reduce the time and enhance the accuracy of assessment. Acoustic measures of plosives may be utilized as valuable cues for automatic assessment.

The current study was aimed to investigate the burst duration and spectral moment (centroid, skewness and kurtosis) of plosives in CHI in comparison with TDC. 24 children in the age range of 5 to 8 years, divided into group I (13 TDC) and group II (11 CHI) participated. Six words in Hindi embedded with plosives (/p/, /b/, /ʈ/, /ɖ/, /k/, /ɡ/) in the initial position were used as speech material.

Burst duration, spectral centroid and skewness were found to be significantly different across the groups for most of the plosives, whereas kurtosis was not. Results indicate that these measures except kurtosis are potential cues for automatic assessment of articulatory errors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Matthew Perez|AUTHOR Matthew Perez]], [[Zakaria Aldeneh|AUTHOR Zakaria Aldeneh]], [[Emily Mower Provost|AUTHOR Emily Mower Provost]]
</p><p class="cpabstractcardaffiliationlist">University of Michigan, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4986–4990&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Robust speech recognition is a key prerequisite for semantic feature extraction in automatic aphasic speech analysis. However, standard one-size-fits-all automatic speech recognition models perform poorly when applied to aphasic speech. One reason for this is the wide range of speech intelligibility due to different levels of severity (i.e., higher severity lends itself to less intelligible speech). To address this, we propose a novel acoustic model based on a mixture of experts (MoE), which handles the varying intelligibility stages present in aphasic speech by explicitly defining severity-based experts. At test time, the contribution of each expert is decided by estimating speech intelligibility with a speech intelligibility detector (SID). We show that our proposed approach significantly reduces phone error rates across all severity stages in aphasic speech compared to a baseline approach that does not incorporate severity information into the modeling process.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shamane Siriwardhana|AUTHOR Shamane Siriwardhana]], [[Andrew Reis|AUTHOR Andrew Reis]], [[Rivindu Weerasekera|AUTHOR Rivindu Weerasekera]], [[Suranga Nanayakkara|AUTHOR Suranga Nanayakkara]]
</p><p class="cpabstractcardaffiliationlist">University of Auckland, New Zealand</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3755–3759&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multimodal emotion recognition from the speech is an important area in affective computing. Fusing multiple data modalities and learning representations with limited amounts of labeled data is a challenging task. In this paper, we explore the use of modality specific “BERT-like” pretrained Self Supervised Learning (SSL) architectures to represent both speech and text modalities for the task of multimodal speech emotion recognition. By conducting experiments on three publicly available datasets (IEMOCAP, CMU-MOSEI, and CMU-MOSI), we show that jointly fine-tuning “BERT-like” SSL architectures achieve state-of-the-art (SOTA) results. We also evaluate two methods of fusing speech and text modalities and show that a simple fusion mechanism can outperform more complex ones when using SSL models that have similar architectural properties to BERT.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu-An Chung|AUTHOR Yu-An Chung]], [[Hao Tang|AUTHOR Hao Tang]], [[James Glass|AUTHOR James Glass]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3760–3764&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Autoregressive Predictive Coding (APC), as a self-supervised objective, has enjoyed success in learning representations from large amounts of unlabeled data, and the learned representations are rich for many downstream tasks. However, the connection between low self-supervised loss and strong performance in downstream tasks remains unclear. In this work, we propose Vector-Quantized Autoregressive Predictive Coding (VQ-APC), a novel model that produces quantized representations, allowing us to explicitly control the amount of information encoded in the representations. By studying a sequence of increasingly limited models, we reveal the constituents of the learned representations. In particular, we confirm the presence of information with probing tasks, while showing the //absence// of information with mutual information, uncovering the model’s preference in preserving speech information as its capacity becomes constrained. We find that there exists a point where phonetic and speaker information are amplified to maximize a self-supervised objective. As a byproduct, the learned codes for a particular model capacity correspond well to English phones.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xingchen Song|AUTHOR Xingchen Song]]^^1^^, [[Guangsen Wang|AUTHOR Guangsen Wang]]^^2^^, [[Yiheng Huang|AUTHOR Yiheng Huang]]^^3^^, [[Zhiyong Wu|AUTHOR Zhiyong Wu]]^^1^^, [[Dan Su|AUTHOR Dan Su]]^^3^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Salesforce, Singapore; ^^3^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3765–3769&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Self-attention network (SAN) can benefit significantly from the bi-directional representation learning through unsupervised pre-training paradigms such as BERT and XLNet. In this paper, we present an XLNet-like pretraining scheme “Speech-XLNet” to learn speech representations with self-attention networks (SANs). Firstly, we find that by shuffling the speech frame orders, Speech-XLNet serves as a strong regularizer which encourages the SAN network to make inferences by focusing on global structures through its attention weights. Secondly, Speech-XLNet also allows the model to explore bi-directional context information while maintaining the autoregressive training manner. Visualization results show that our approach can generalize better with more flattened and widely distributed optimas compared to the conventional approach. Experimental results on TIMIT demonstrate that Speech-XLNet greatly improves hybrid SAN/HMM in terms of both convergence speed and recognition accuracy. Our best systems achieve a relative improvement of 15.2% on the TIMIT task. Besides, we also apply our pretrained model to an End-to-End SAN with WSJ dataset and WER is reduced by up to 68% when only a few hours of transcribed data is used.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kritika Singh|AUTHOR Kritika Singh]], [[Vimal Manohar|AUTHOR Vimal Manohar]], [[Alex Xiao|AUTHOR Alex Xiao]], [[Sergey Edunov|AUTHOR Sergey Edunov]], [[Ross Girshick|AUTHOR Ross Girshick]], [[Vitaliy Liptchinsky|AUTHOR Vitaliy Liptchinsky]], [[Christian Fuegen|AUTHOR Christian Fuegen]], [[Yatharth Saraf|AUTHOR Yatharth Saraf]], [[Geoffrey Zweig|AUTHOR Geoffrey Zweig]], [[Abdelrahman Mohamed|AUTHOR Abdelrahman Mohamed]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3770–3774&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many semi- and weakly-supervised approaches have been investigated for overcoming the labeling cost of building high-quality speech recognition systems. On the challenging task of transcribing social media videos in low-resource conditions, we conduct a large scale systematic comparison between two self-labeling methods on one hand, and weakly-supervised pretraining using contextual metadata on the other. We investigate distillation methods at the frame level and the sequence level for hybrid, encoder-only Connectionist Temporal Classification (CTC) based, and encoder-decoder speech recognition systems on Dutch and Romanian languages using 27,000 and 58,000 hours of unlabeled audio respectively. Although all approaches improved upon their respective baseline word error rates (WER) by more than 8%, sequence-level distillation for encoder-decoder models provided the largest relative WER reduction of 20% compared to the strongest data-augmented supervised baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kenichi Kumatani|AUTHOR Kenichi Kumatani]], [[Dimitrios Dimitriadis|AUTHOR Dimitrios Dimitriadis]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Robert Gmyr|AUTHOR Robert Gmyr]], [[Sefik Emre Eskimez|AUTHOR Sefik Emre Eskimez]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Michael Zeng|AUTHOR Michael Zeng]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3775–3779&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we develop new self-learning techniques with an attention-based sequence-to-sequence (seq2seq) model for automatic speech recognition (ASR). For untranscribed speech data, the hypothesis from an ASR system must be used as a label. However, the imperfect ASR result makes unsupervised learning difficult to consistently improve recognition performance especially in the case that multiple powerful teacher models are unavailable. In contrast to conventional unsupervised learning approaches, we adopt the //multi-task learning// (MTL) framework where the n-th best ASR hypothesis is used as the label of each task. The seq2seq network is updated through the MTL framework so as to find the common representation that can cover multiple hypotheses. By doing so, the effect of the //hard-decision// errors can be alleviated. We first demonstrate the effectiveness of our self-learning methods through ASR experiments in an accent adaptation task between the US and British English speech. Our experiment results show that our method can reduce the WER on the British speech data from 14.55% to 10.36% compared to the baseline model trained with the US English data only. Moreover, we investigate the effect of our proposed methods in a federated learning scenario.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haibin Wu|AUTHOR Haibin Wu]], [[Andy T. Liu|AUTHOR Andy T. Liu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3780–3784&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>High-performance anti-spoofing models for automatic speaker verification (ASV), have been widely used to protect ASV by identifying and filtering spoofing audio that is deliberately generated by text-to-speech, voice conversion, audio replay, etc. However, it has been shown that high-performance anti-spoofing models are vulnerable to adversarial attacks. Adversarial attacks, that are indistinguishable from original data but result in the incorrect predictions, are dangerous for anti-spoofing models and not in dispute we should detect them at any cost. To explore this issue, we proposed to employ Mockingjay, a self-supervised learning based model, to protect anti-spoofing models against adversarial attacks in the black-box scenario. Self-supervised learning models are effective in improving downstream task performance like phone classification or ASR. However, their effect in defense for adversarial attacks has not been explored yet. In this work, we explore the robustness of self-supervised learned high-level representations by using them in the defense against adversarial attacks. A layerwise noise-to-signal ratio (LNSR) is proposed to quantize and measure the effectiveness of deep models in countering adversarial noise. Experimental results on the ASVspoof 2019 dataset demonstrate that high-level representations extracted by Mockingjay can prevent the transferability of adversarial examples, and successfully counter black-box attacks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shu-wen Yang|AUTHOR Shu-wen Yang]], [[Andy T. Liu|AUTHOR Andy T. Liu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3785–3789&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Self-supervised Audio Transformers (SAT) enable great success in many downstream speech applications like ASR, but how they work has not been widely explored yet. In this work, we present multiple strategies for the analysis of attention mechanisms in SAT. We categorize attentions into explainable categories, where we discover each category possesses its own unique functionality. We provide a visualization tool for understanding multi-head self-attention, importance ranking strategies for identifying critical attention, and attention refinement techniques to improve model performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sameer Khurana|AUTHOR Sameer Khurana]]^^1^^, [[Antoine Laurent|AUTHOR Antoine Laurent]]^^2^^, [[Wei-Ning Hsu|AUTHOR Wei-Ning Hsu]]^^1^^, [[Jan Chorowski|AUTHOR Jan Chorowski]]^^3^^, [[Adrian Lancucki|AUTHOR Adrian Lancucki]]^^4^^, [[Ricard Marxer|AUTHOR Ricard Marxer]]^^5^^, [[James Glass|AUTHOR James Glass]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT, USA; ^^2^^LIUM (EA 4023), France; ^^3^^University of Wrocław, Poland; ^^4^^NVIDIA, Poland; ^^5^^LIS (UMR 7020), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3790–3794&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Probabilistic Latent Variable Models (LVMs) provide an alternative to self-supervised learning approaches for linguistic representation learning from speech. LVMs admit an intuitive probabilistic interpretation where the latent structure shapes the information extracted from the signal. Even though LVMs have recently seen a renewed interest due to the introduction of Variational Autoencoders (VAEs), their use for speech representation learning remains largely unexplored. In this work, we propose Convolutional Deep Markov Model (ConvDMM), a Gaussian state-space model with non-linear emission and transition functions modelled by deep neural networks. This unsupervised model is trained using black box variational inference. A deep convolutional neural network is used as an inference network for structured variational approximation. When trained on a large scale speech dataset (LibriSpeech), ConvDMM produces features that significantly outperform multiple self-supervised feature extracting methods on linear phone classification and recognition on the Wall Street Journal dataset. Furthermore, we found that ConvDMM complements self-supervised methods like Wav2Vec and PASE, improving on the results achieved with any of the methods alone. Lastly, we find that ConvDMM features enable learning better phone recognizers than any other features in an extreme low-resource regime with few labelled training examples.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ayimunishagu Abulimiti|AUTHOR Ayimunishagu Abulimiti]], [[Jochen Weiner|AUTHOR Jochen Weiner]], [[Tanja Schultz|AUTHOR Tanja Schultz]]
</p><p class="cpabstractcardaffiliationlist">Universität Bremen, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3795–3799&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The //Interdisciplinary Longitudinal Study on Adult Development and Aging// (ILSE) was initiated with the aim to investigate satisfying and healthy aging. Over 20 years, about 4200 hours of biographic interviews from more than 1,000 participants were recorded. Spoken language is a strong indicator for declining cognitive resources, as it is affected in early stage. Hence, various research topics related to aging like dementia, could be analyzed based on data such as the ILSE interviews. The analysis of language capabilities requires transcribed speech. Since manual transcriptions are time and cost consuming, we aim to automatically transcribing the ILSE data using Automatic Speech Recognition (ASR). The recognition of ILSE interviews is very demanding due to the combination of various challenges: 20 year old analog two-speaker one-channel recordings of low signal quality, emotional and personal interviews between doctor and participant, and repeated recordings of aging, partly fragile individuals. In this study, we describe ongoing work to develop hybrid Hidden Markov Model (HMM)- Deep Neural Network (DNN) based ASR system for the ILSE corpus. So far, the best ASR system is obtained by second-pass decoding of a hybrid HMM-DNN model using recurrent neural network based language models with a word error rate of 50.39%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rohan Kumar Das|AUTHOR Rohan Kumar Das]]^^1^^, [[Xiaohai Tian|AUTHOR Xiaohai Tian]]^^1^^, [[Tomi Kinnunen|AUTHOR Tomi Kinnunen]]^^2^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^University of Eastern Finland, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4213–4217&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Security of automatic speaker verification (ASV) systems is compromised by various spoofing attacks. While many types of //non-proactive// attacks (and their defenses) have been studied in the past, //attacker’s// perspective on ASV, represents a far less explored direction. It can potentially help to identify the weakest parts of ASV systems and be used to develop attacker-aware systems. We present an overview on this emerging research area by focusing on potential threats of adversarial attacks on ASV, spoofing countermeasures, or both. We conclude the study with discussion on selected attacks and leveraging from such knowledge to improve defense mechanisms against adversarial attacks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexey Sholokhov|AUTHOR Alexey Sholokhov]]^^1^^, [[Tomi Kinnunen|AUTHOR Tomi Kinnunen]]^^2^^, [[Ville Vestman|AUTHOR Ville Vestman]]^^2^^, [[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Huawei Technologies, Russia; ^^2^^University of Eastern Finland, Finland; ^^3^^NEC, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4218–4222&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speaker verification (ASV) vendors and corpus providers would both benefit from tools to reliably extrapolate performance metrics for large speaker populations //without collecting new speakers//. We address false alarm rate extrapolation under a worst-case model whereby an adversary identifies the closest impostor for a given target speaker from a large population. Our models are generative and allow sampling new speakers. The models are formulated in the ASV detection score space to facilitate analysis of arbitrary ASV systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ziyue Jiang|AUTHOR Ziyue Jiang]]^^1^^, [[Hongcheng Zhu|AUTHOR Hongcheng Zhu]]^^1^^, [[Li Peng|AUTHOR Li Peng]]^^1^^, [[Wenbing Ding|AUTHOR Wenbing Ding]]^^1^^, [[Yanzhen Ren|AUTHOR Yanzhen Ren]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Wuhan University, China; ^^2^^Ministry of Education, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4223–4227&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With the development of deep generation technology, spoofing audio technology based on speech synthesis and speech conversion is closer to reality, which challenges the credibility of the media in social networks. This paper proposes a self-supervised spoofing audio detection scheme(SSAD). In SSAD, eight convolutional blocks are used to capture the local feature of the audio signal. The temporal convolutional network (TCN) is used to capture the context features and realize the operation in parallel. Three regression workers and one binary worker are designed to achieve better performance in fake and spoofing audio detection. The experimental results on ASVspoof 2019 dataset show that the detection accuracy of SSAD outperforms the state-of-art. It shows that the self-supervised method is effective for the task of spoofing audio detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qing Wang|AUTHOR Qing Wang]]^^1^^, [[Pengcheng Guo|AUTHOR Pengcheng Guo]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4228–4232&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker recognition is a popular topic in biometric authentication and many deep learning approaches have achieved extraordinary performances. However, it has been shown in both image and speech applications that deep neural networks are vulnerable to adversarial examples. In this study, we aim to exploit this weakness to perform targeted adversarial attacks against the x-vector based speaker recognition system. We propose to generate inaudible adversarial perturbations based on the psychoacoustic principle of frequency masking, achieving targeted white-box attacks to speaker recognition system. Specifically, we constrict the perturbation under the masking threshold of original audio, instead of using a common l,,p,, norm to measure the perturbations. Experiments on Aishell-1 corpus show that our approach yields up to 98.5% attack success rate to arbitrary gender speaker targets, while retaining indistinguishable attribute to listeners. Furthermore, we also achieve an effective speaker attack when applying the proposed approach to a completely irrelevant waveform, such as music.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jesús Villalba|AUTHOR Jesús Villalba]], [[Yuekai Zhang|AUTHOR Yuekai Zhang]], [[Najim Dehak|AUTHOR Najim Dehak]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4233–4237&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic Speaker Verification (ASV) enables high-security applications like user authentication or criminal investigation. However, ASV can be subjected to malicious attacks, which could compromise that security. The ASV literature mainly studies spoofing (a.k.a impersonation) attacks such as voice replay, synthesis or conversion. Meanwhile, other kinds of attacks, known as adversarial attacks, have become a threat to all kind of machine learning systems. Adversarial attacks introduce an imperceptible perturbation in the input signal that radically changes the behavior of the system. These attacks have been intensively studied in the image domain but less in the speech domain.

In this work, we investigate the vulnerability of state-of-the-art ASV systems to adversarial attacks. We consider a threat model consisting in adding a perturbation noise to the test waveform to alter the ASV decision. We also discuss the methodology and metrics to benchmark adversarial attacks and defenses in ASV. We evaluated three x-vector architectures, which performed among the best in recent ASV evaluations, against fast gradient sign and Carlini-Wagner attacks. All networks were highly vulnerable in the white-box attack scenario, even for high SNR (30–60 dB). Furthermore, we successfully transferred attacks generated with smaller white-box networks to attack a larger black-box network.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuekai Zhang|AUTHOR Yuekai Zhang]], [[Ziyan Jiang|AUTHOR Ziyan Jiang]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Najim Dehak|AUTHOR Najim Dehak]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4238–4242&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoofing countermeasure systems protect Automatic Speaker Verification (ASV) systems from spoofing attacks such as replay, synthesis, and conversion. However, research has shown spoofing countermeasures are vulnerable to adversarial attacks. Previous literature mainly uses adversarial attacks on spoofing countermeasures under a white-box scenario, where attackers could access all the information of the victim networks. Blackbox attacks would be a more serious threat than white-box attacks.

In this paper, our objective is to black-box attack spoofing countermeasures using adversarial examples with high transferability. We used MI-FGSM to improve the transferability of adversarial examples. We propose an iterative ensemble method (IEM) to further improve the transferability. Comparing with previous ensemble-based attacks, our proposed IEM method, combined with MI-FGSM, could effectively generate adversarial examples with higher transferability. In our experiments, we evaluated the attacks on four black-box networks. For each black-box model, we used the other three as a white-box ensemble to generate the adversarial examples. The proposed IEM with MI-FGSM improved attack success rate by 4–30% relative (depending on black-box model) w.r.t. the baseline logit ensemble. Therefore, we conclude that spoofing countermeasure models are also vulnerable to black-box attacks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Liming Wang|AUTHOR Liming Wang]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]
</p><p class="cpabstractcardaffiliationlist">University of Illinois at Urbana-Champaign, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1456–1460&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Discovering word-like units without textual transcriptions is an important step in low-resource speech technology. In this work, we demonstrate a model inspired by statistical machine translation and hidden Markov model/deep neural network (HMM-DNN) hybrid systems. Our learning algorithm is capable of discovering the visual and acoustic correlates of K distinct words in an unknown language by simultaneously learning the mapping from image regions to concepts (the first DNN), the mapping from acoustic feature vectors to phones (the second DNN), and the optimum alignment between the two (the HMM). In the simulated low-resource setting using MSCOCO and SpeechCOCO datasets, our model achieves 62.4% alignment accuracy and outperforms the audio-only segmental embedded GMM approach on standard word discovery evaluation metrics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maha Elbayad|AUTHOR Maha Elbayad]]^^1^^, [[Laurent Besacier|AUTHOR Laurent Besacier]]^^1^^, [[Jakob Verbeek|AUTHOR Jakob Verbeek]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIG (UMR 5217), France; ^^2^^Facebook, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1461–1465&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Simultaneous machine translation consists in starting output generation before the entire input sequence is available. Wait-k decoders offer a simple but efficient approach for this problem. They first read k source tokens, after which they alternate between producing a target token and reading another source token. We investigate the behavior of wait-k decoding in low resource settings for spoken corpora using IWSLT datasets. We improve training of these models using unidirectional encoders, and training across multiple values of k. Experiments with Transformer and 2D-convolutional architectures show that our wait-k models generalize well across a wide range of latency levels. We also show that the 2D-convolution architecture is competitive with Transformers for simultaneous translation of spoken language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ha Nguyen|AUTHOR Ha Nguyen]]^^1^^, [[Fethi Bougares|AUTHOR Fethi Bougares]]^^2^^, [[N. Tomashenko|AUTHOR N. Tomashenko]]^^3^^, [[Yannick Estève|AUTHOR Yannick Estève]]^^3^^, [[Laurent Besacier|AUTHOR Laurent Besacier]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIG (UMR 5217), France; ^^2^^LIUM (EA 4023), France; ^^3^^LIA (EA 4128), France; ^^4^^LIG (UMR 5217), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1466–1470&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Self-supervised learning from raw speech has been proven beneficial to improve automatic speech recognition (ASR). We investigate here its impact on end-to-end automatic speech translation (AST) performance. We use a contrastive predictive coding (CPC) model pre-trained from unlabeled speech as a feature extractor for a downstream AST task. We show that self-supervised pre-training is particularly efficient in low resource settings and that fine-tuning CPC models on the AST training data further improves performance. Even in higher resource settings, ensembling AST models trained with filter-bank and CPC representations leads to near state-of-the-art models without using any ASR pre-training. This might be particularly beneficial when one needs to develop a system that translates from speech in a language with poorly standardized orthography or even from speech in an unwritten language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marco Gaido|AUTHOR Marco Gaido]], [[Mattia A. Di Gangi|AUTHOR Mattia A. Di Gangi]], [[Matteo Negri|AUTHOR Matteo Negri]], [[Mauro Cettolo|AUTHOR Mauro Cettolo]], [[Marco Turchi|AUTHOR Marco Turchi]]
</p><p class="cpabstractcardaffiliationlist">FBK, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1471–1475&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Direct speech-to-text translation (ST) models are usually trained on corpora segmented at sentence level, but at inference time they are commonly fed with audio split by a voice activity detector (VAD). Since VAD segmentation is not syntax-informed, the resulting segments do not necessarily correspond to well-formed sentences uttered by the speaker but, most likely, to fragments of one or more sentences. This segmentation mismatch degrades considerably the quality of ST models’ output. So far, researchers have focused on improving audio segmentation towards producing sentence-like splits. In this paper, instead, we address the issue in the model, making it more robust to a different, potentially sub-optimal segmentation. To this aim, we train our models on randomly segmented data and compare two approaches: fine-tuning and adding the previous segment as context. We show that our context-aware solution is more robust to VAD-segmented input, outperforming a strong base model and the fine-tuning on different VAD segmentations of an English-German test set by up to 4.25 BLEU points.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Juan Pino|AUTHOR Juan Pino]], [[Qiantong Xu|AUTHOR Qiantong Xu]], [[Xutai Ma|AUTHOR Xutai Ma]], [[Mohammad Javad Dousti|AUTHOR Mohammad Javad Dousti]], [[Yun Tang|AUTHOR Yun Tang]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1476–1480&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>One of the main challenges for end-to-end speech translation is data scarcity. We leverage pseudo-labels generated from unlabeled audio by a cascade and an end-to-end speech translation model. This provides 8.3 and 5.7 BLEU gains over a strong semi-supervised baseline on the MuST-C English-French and English-German datasets, reaching state-of-the art performance. The effect of the quality of the pseudo-labels is investigated. Our approach is shown to be more effective than simply pre-training the encoder on the speech recognition task. Finally, we demonstrate the effectiveness of self-training by directly generating pseudo-labels with an end-to-end model instead of a cascade model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marcello Federico|AUTHOR Marcello Federico]]^^1^^, [[Yogesh Virkar|AUTHOR Yogesh Virkar]]^^1^^, [[Robert Enyedi|AUTHOR Robert Enyedi]]^^1^^, [[Roberto Barra-Chicote|AUTHOR Roberto Barra-Chicote]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^Amazon, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1481–1485&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic dubbing aims at replacing all speech contained in a video with speech in a different language, so that the result sounds and looks as natural as the original. Hence, in addition to conveying the same content of an original utterance (which is the typical objective of speech translation), dubbed speech should ideally also match its duration, the lip movements and gestures in the video, timbre, emotion and prosody of the speaker, and finally background noise and reverberation of the environment. In this paper, after describing our dubbing architecture, we focus on recent progress on the prosodic alignment component, which aims at synchronizing the translated transcript with the original utterances. We present empirical results for English-to-Italian dubbing on a publicly available collection of TED Talks. Our new prosodic alignment model, which allows for small relaxations in synchronicity, shows to significantly improve both prosodic alignment accuracy and overall subjective dubbing quality of previous work.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yasunori Ohishi|AUTHOR Yasunori Ohishi]]^^1^^, [[Akisato Kimura|AUTHOR Akisato Kimura]]^^1^^, [[Takahito Kawanishi|AUTHOR Takahito Kawanishi]]^^1^^, [[Kunio Kashino|AUTHOR Kunio Kashino]]^^1^^, [[David Harwath|AUTHOR David Harwath]]^^2^^, [[James Glass|AUTHOR James Glass]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTT, Japan; ^^2^^MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1486–1490&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a data expansion method for learning a multilingual semantic embedding model using disjoint datasets containing images and their multilingual audio captions. Here, disjoint means that there are no shared images among the multiple language datasets, in contrast to existing works on multilingual semantic embedding based on visually-grounded speech audio, where it has been assumed that each image is associated with spoken captions of multiple languages. Although learning on disjoint datasets is more challenging, we consider it crucial in practical situations. Our main idea is to refer to another paired data when evaluating a loss value regarding an anchor image. We call this scheme “pair expansion”. The motivation behind this idea is to utilize even disjoint pairs by finding similarities, or commonalities, that may exist in different images. Specifically, we examine two approaches for calculating similarities: one using image embedding vectors and the other using object recognition results. Our experiments show that expanded pairs improve crossmodal and cross-lingual retrieval accuracy compared with non-expanded cases. They also show that similarities measured by the image embedding vectors yield better accuracy than those based on object recognition results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anne Wu|AUTHOR Anne Wu]], [[Changhan Wang|AUTHOR Changhan Wang]], [[Juan Pino|AUTHOR Juan Pino]], [[Jiatao Gu|AUTHOR Jiatao Gu]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1491–1495&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end speech-to-text translation can provide a simpler and smaller system but is facing the challenge of data scarcity. Pre-training methods can leverage unlabeled data and have been shown to be effective on data-scarce settings. In this work, we explore whether self-supervised pre-trained speech representations can benefit the speech translation task in both high- and low-resource settings, whether they can transfer well to other languages, and whether they can be effectively combined with other common methods that help improve low-resource end-to-end speech translation such as using a pre-trained high-resource speech recognition system. We demonstrate that self-supervised pre-trained features can consistently improve the translation performance, and cross-lingual transfer allows to extend to a variety of languages without or with little tuning.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vincent Hughes|AUTHOR Vincent Hughes]]^^1^^, [[Frantz Clermont|AUTHOR Frantz Clermont]]^^2^^, [[Philip Harrison|AUTHOR Philip Harrison]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of York, UK; ^^2^^Australian National University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1858–1862&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A significant question for forensic voice comparison, and for speaker recognition more generally, is the extent to which different input features capture complementary speaker-specific information. Understanding complementarity allows us to make predictions about how combining methods using different features may produce better overall performance. In forensic contexts, it is also important to be able to explain to courts what information the underlying features are actually capturing. This paper addresses these issues by examining the extent to which MFCCs and LPCCs can predict F0, F1, F2, and F3 values using data extracted from the midpoint of the vocalic portion of the hesitation marker //um// for 89 speakers of standard southern British English. By-speaker correlations were calculated using multiple linear regression and performance was assessed using mean rho (ρ) values. Results show that the first two formants were more accurately predicted than F3 or F0. LPCCs consistently produced stronger correlations with the linguistic features than MFCCs, while increasing cepstral order up to 16 also increased the strength of the correlations. There was, however, considerable variability across speakers in terms of the accuracy of the predictions. We discuss the implications of these findings for forensic voice comparison.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jana Neitsch|AUTHOR Jana Neitsch]]^^1^^, [[Plinio A. Barbosa|AUTHOR Plinio A. Barbosa]]^^2^^, [[Oliver Niebuhr|AUTHOR Oliver Niebuhr]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern Denmark, Denmark; ^^2^^Unicamp, Brazil</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1863–1867&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Several studies have shown that rhetorical //wh//-questions (RQs) and string-identical information-seeking //wh//-questions (ISQs) are realized with different prosodic characteristics. In contrast to ISQs, RQs have been shown to be phonetically realized with a breathier (i.e., softer) voice quality (e.g., German and English) and longer constituent durations (e.g., German, English, Icelandic). Based on similar results found for different languages, we investigate //wh//-RQs and sting-identical //wh//-ISQs in Brazilian Portuguese (BP) and German (G). We analyze (i) whether specific duration and voice-quality patterns characterize and separate the two illocution types (RQ and ISQ) in BP, and (ii) if direct measures of the respiratory sub-system reveal differences between illocution types, given that breathiness involves greater transglottal air flow which can be observed in the speakers’ chest and/or abdomen movement.

Our data suggest that, similar to G, English, and Icelandic, duration and voice quality patterns play a role in the realization of RQs compared to ISQs in BP, reinforcing the assumption that there are cross-linguistically similar phonetic features in the realization of RQs compared to ISQs. We also find that speakers of G breathe in more deeply and dynamically than speakers of BP, suggesting a link between breathing and voice quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rebecca Defina|AUTHOR Rebecca Defina]], [[Catalina Torres|AUTHOR Catalina Torres]], [[Hywel Stoakes|AUTHOR Hywel Stoakes]]
</p><p class="cpabstractcardaffiliationlist">University of Melbourne, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1868–1872&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Clause chains are a syntactic strategy for combining multiple clauses into a single unit. They are reported in many languages, including Korean and Turkish. However, they have seen relatively little focused research. In particular, prosodic features are often mentioned in descriptions of clause chaining, however there have been vanishingly few investigations. Corpus-based studies of the prosody of clause chains in two unrelated languages of Papua New Guinea report that they are typically produced as a sequence of Intonation phrases united by pitch-scaling of the L% boundary tones in each clause with only the final, finite, clause descending to a full L%. The present study is the first experimental investigation of the prosody of clause chains in Pitjantjatjara.

This paper focuses on one type of clause chain found in the Australian Indigenous language Pitjantjatjara. We examine a set of 120 clause chains read out by three native Pitjantjatjara speakers. Prosodic analysis reveals that these Pitjantjatjara clause chains are produced within a single Intonational Phrase. Speakers do not pause between the clauses in the chain, there is consistent linear downstep throughout the phrase and additionally phrase final lowering occurs at the end of the utterance. This differs from previous impressionistic studies of the prosody of clause chains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ai Mizoguchi|AUTHOR Ai Mizoguchi]]^^1^^, [[Ayako Hashimoto|AUTHOR Ayako Hashimoto]]^^2^^, [[Sanae Matsui|AUTHOR Sanae Matsui]]^^3^^, [[Setsuko Imatomi|AUTHOR Setsuko Imatomi]]^^4^^, [[Ryunosuke Kobayashi|AUTHOR Ryunosuke Kobayashi]]^^3^^, [[Mafuyu Kitahara|AUTHOR Mafuyu Kitahara]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Maebashi Institute of Technology, Japan; ^^2^^Tokyo Kasei-Gakuin College, Japan; ^^3^^Sophia University, Japan; ^^4^^Mejiro University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1873–1877&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Research on Tohoku dialects, which is a variety of Japanese, has found that the voiceless stops /k/ and /t/ in the intervocalic position are frequently realized as voiced stops. However, the phenomenon has mainly been judged aurally in the Japanese linguistics literature and has not been confirmed by acoustic measurements. We measured the VOT of data originally collected in the survey of Tohoku dialects by [1]. The data used in this study includes two age groups from eight sites. The results demonstrate that for word medial stops, the VOT distribution of voiced and voiceless stops largely overlapped, while, the laryngeal contrast was maintained for the word initial stops. Intervocalic voicing neutralization was confirmed by quantitative acoustic measurements. The effects of neighboring vowels were also investigated to show that height, but not duration, had a significant effect on voicing neutralization. Our results shed light on the phonetic nature of Tohoku dialects as well as on their phonological structure, such as the role of voicing contrast.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lou Lee|AUTHOR Lou Lee]]^^1^^, [[Denis Jouvet|AUTHOR Denis Jouvet]]^^2^^, [[Katarina Bartkova|AUTHOR Katarina Bartkova]]^^1^^, [[Yvon Keromnes|AUTHOR Yvon Keromnes]]^^1^^, [[Mathilde Dargnat|AUTHOR Mathilde Dargnat]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ATILF (UMR 7118), France; ^^2^^Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1878–1882&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates the prosodic characteristics of French and English discourse markers according to their pragmatic meaning in context. The study focusses on three French discourse markers (//alors// [‘so’], //bon// [‘well’], and //donc// [‘so’]) and three English markers (//now, so,// and //well//). Hundreds of occurrences of discourse markers were automatically extracted from French and English speech corpora and manually annotated with pragmatic functions labels. The paper compares the prosodic characteristics of discourse markers in different speech styles and in two languages. The first comparison is carried out with respect to two different speech styles in French: spontaneous speech vs. prepared speech. The other comparison of the prosodic characteristics is conducted between two languages, French vs. English, on the prepared speech. Results show that some pragmatic functions of discourse markers bring about specific prosodic behaviour in terms of presence and position of pauses, and their F0 articulation in their immediate context. Moreover, similar pragmatic functions frequently share similar prosodic characteristics, even across languages.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dina El Zarka|AUTHOR Dina El Zarka]]^^1^^, [[Anneliese Kelterer|AUTHOR Anneliese Kelterer]]^^1^^, [[Barbara Schuppler|AUTHOR Barbara Schuppler]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Graz, Austria; ^^2^^Technische Universität Graz, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1883–1887&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study presents the first acoustic examination of prominence relations in entire contours associated with different information structures in Egyptian Arabic. Previous work has shown that topics and foci are typically associated with different pitch events, whereas it is still a matter of debate whether and how Egyptian Arabic uses prominence relations to mark narrow focus. The analysis of data from 17 native speakers showed that narrow focus was marked by on-focus pitch expansion as well as post-focus compression. Post-focus compression was realized as a large downstep after focus, compressed pitch range, lower intensity and shorter duration. The results also showed further register lowering after a contrastive focus, but no further pitch boost of the focused word. By contrast, a contrastive topic showed higher scaling of the topic as well as an expanded pitch range of the overall contour. The findings of this study stress the significance of whole contours to convey intonational meanings, revealing gradient prominence cues to focus across the utterance, specifically post-focus register lowering to enhance the prominence of a contrastive focus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Benazir Mumtaz|AUTHOR Benazir Mumtaz]], [[Tina Bögel|AUTHOR Tina Bögel]], [[Miriam Butt|AUTHOR Miriam Butt]]
</p><p class="cpabstractcardaffiliationlist">Universität Konstanz, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1888–1892&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study looks at the role of lexical stress in Urdu prosody. The literature on lexical stress is divided, with some authors developing algorithms for stress assignment, while others deny its relevance for prosody. We performed three experiments to investigate this issue. We found evidence that a strong increase in the duration of a syllable indicates stress and that lexical stress and phrasal intonation interact in a non-trivial manner. We also found that stress perception varies according to syllable weight with weight clash being a determining factor.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rachid Riad|AUTHOR Rachid Riad]]^^1^^, [[Hadrien Titeux|AUTHOR Hadrien Titeux]]^^1^^, [[Laurie Lemoine|AUTHOR Laurie Lemoine]]^^2^^, [[Justine Montillot|AUTHOR Justine Montillot]]^^2^^, [[Jennifer Hamet Bagnou|AUTHOR Jennifer Hamet Bagnou]]^^2^^, [[Xuan-Nga Cao|AUTHOR Xuan-Nga Cao]]^^1^^, [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]]^^1^^, [[Anne-Catherine Bachoud-Lévi|AUTHOR Anne-Catherine Bachoud-Lévi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LSCP (UMR 8554), France; ^^2^^NPI (U955 E01), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1893–1897&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Disease-modifying treatments are currently assessed in neurodegenerative diseases. Huntington’s Disease represents a unique opportunity to design automatic sub-clinical markers, even in premanifest gene carriers. We investigated phonatory impairments as potential clinical markers and propose them for both diagnosis and gene carriers follow-up. We used two sets of features: Phonatory features and Modulation Power Spectrum Features. We found that phonation is not sufficient for the identification of sub-clinical disorders of premanifest gene carriers. According to our regression results, Phonatory features are suitable for the predictions of clinical performance in Huntington’s Disease.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Laure Dentel|AUTHOR Laure Dentel]]^^1^^, [[Julien Meyer|AUTHOR Julien Meyer]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^World Whistles Research Association, France; ^^2^^GIPSA-lab (UMR 5216), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1898–1902&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human languages have the flexibility to be acoustically adapted to the context of communication, such as in shouting or whispering. Drummed forms of languages represent one of the most extreme natural expressions of such speech adaptability. A large amount of research has been conducted on drummed languages in anthropology or linguistics, particularly in West African societies. However, in spite of the clearly rhythmic nature of drumming, previous studies have largely neglected exploring systematically the role of speech rhythm. Here, we explore a unique corpus of the Bendré drummed speech form of the Mossi people, transcribed published in the 80’s by the anthropologist Kawada Junzo. The analysis of this large database in Mooré language reveals that the rhythmic units encoded in the length of pauses between drumbeats match more closely with vowel-to-vowel intervals than with syllable parsing. Meanwhile, we confirm for the first time a result found recently on the drummed speech tradition of the Bora Amazonian language. However, the complex acoustic structure of the Bendré skin drum required much more attention than the simple two pitch hollow log drum of the Bora. Thus, we also present here results on how drummed Bendré timbre encodes tones of Mooré language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Seung-bin Kim|AUTHOR Seung-bin Kim]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ju-ho Kim|AUTHOR Ju-ho Kim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]
</p><p class="cpabstractcardaffiliationlist">University of Seoul, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1496–1500&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advances in deep learning have facilitated the design of speaker verification systems that directly input raw waveforms. For example, RawNet [1] extracts speaker embeddings from raw waveforms, which simplifies the process pipeline and demonstrates competitive performance. In this study, we improve RawNet by scaling feature maps using various methods. The proposed mechanism utilizes a scale vector that adopts a sigmoid non-linear function. It refers to a vector with dimensionality equal to the number of filters in a given feature map. Using a scale vector, we propose to scale the feature map multiplicatively, additively, or both. In addition, we investigate replacing the first convolution layer with the sinc-convolution layer of SincNet. Experiments performed on the VoxCeleb1 evaluation dataset demonstrate the effectiveness of the proposed methods, and the best performing system reduces the equal error rate by half compared to the original RawNet. Expanded evaluation results obtained using the VoxCeleb1-E and VoxCeleb-H protocols marginally outperform existing state-of-the-art systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xu Li|AUTHOR Xu Li]]^^1^^, [[Na Li|AUTHOR Na Li]]^^2^^, [[Jinghua Zhong|AUTHOR Jinghua Zhong]]^^3^^, [[Xixin Wu|AUTHOR Xixin Wu]]^^4^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Dong Yu|AUTHOR Dong Yu]]^^5^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^Tencent, China; ^^3^^SpeechX, China; ^^4^^University of Cambridge, UK; ^^5^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1540–1544&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently adversarial attacks on automatic speaker verification (ASV) systems attracted widespread attention as they pose severe threats to ASV systems. However, methods to defend against such attacks are limited. Existing approaches mainly focus on retraining ASV systems with adversarial data augmentation. Also, countermeasure robustness against different attack settings are insufficiently investigated. Orthogonal to prior approaches, this work proposes to defend ASV systems against adversarial attacks with a separate detection network, rather than augmenting adversarial data into ASV training. A VGG-like binary classification detector is introduced and demonstrated to be effective on detecting adversarial samples. To investigate detector robustness in a realistic defense scenario where unseen attack settings may exist, we analyze various kinds of unseen attack settings’ impact and observe that the detector is robust (6.27% EER,,det,, degradation in the worst case) against unseen substitute ASV systems, but it has weak robustness (50.37% EER,,det,, degradation in the worst case) against unseen perturbation methods. The weak robustness against unseen perturbation methods shows a direction for developing stronger countermeasures.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Seong Min Kye|AUTHOR Seong Min Kye]], [[Yeunju Choi|AUTHOR Yeunju Choi]], [[Myunghun Jung|AUTHOR Myunghun Jung]], [[Hoirin Kim|AUTHOR Hoirin Kim]]
</p><p class="cpabstractcardaffiliationlist">KAIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1501–1505&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Currently, the most widely used approach for speaker verification is the deep speaker embedding learning. In this approach, we obtain a speaker embedding vector by pooling single-scale features that are extracted from the last layer of a speaker feature extractor. Multi-scale aggregation (MSA), which utilizes multi-scale features from different layers of the feature extractor, has recently been introduced and shows superior performance for variable-duration utterances. To increase the robustness dealing with utterances of arbitrary duration, this paper improves the MSA by using a feature pyramid module. The module enhances speaker-discriminative information of features from multiple layers via a top-down pathway and lateral connections. We extract speaker embeddings using the enhanced features that contain rich speaker information with different time scales. Experiments on the VoxCeleb dataset show that the proposed module improves previous MSA methods with a smaller number of parameters. It also achieves better performance than state-of-the-art approaches for both short and long utterances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bin Gu|AUTHOR Bin Gu]], [[Wu Guo|AUTHOR Wu Guo]], [[Fenglin Ding|AUTHOR Fenglin Ding]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Jun Du|AUTHOR Jun Du]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1506–1510&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, adaptive mechanisms are applied in deep neural network (DNN) training for x-vector-based text-independent speaker verification. First, adaptive convolutional neural networks (ACNNs) are employed in frame-level embedding layers, where the parameters of the convolution filters are adjusted based on the input features. Compared with conventional CNNs, ACNNs have more flexibility in capturing speaker information. Moreover, we replace conventional batch normalization (BN) with adaptive batch normalization (ABN). By dynamically generating the scaling and shifting parameters in BN, ABN adapts models to the acoustic variability arising from various factors such as channel and environmental noises. Finally, we incorporate these two methods to further improve performance. Experiments are carried out on the speaker in the wild (SITW) and VOiCES databases. The results demonstrate that the proposed methods significantly outperform the original x-vector approach. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Santi Prieto|AUTHOR Santi Prieto]]^^1^^, [[Alfonso Ortega|AUTHOR Alfonso Ortega]]^^2^^, [[Iván López-Espejo|AUTHOR Iván López-Espejo]]^^3^^, [[Eduardo Lleida|AUTHOR Eduardo Lleida]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^das-Nano, Spain; ^^2^^Universidad de Zaragoza, Spain; ^^3^^Aalborg University, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1511–1515&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The performance of speaker verification systems degrades when vocal effort conditions between enrollment and test (e.g., shouted vs. normal speech) are different. This is a potential situation in non-cooperative speaker verification tasks. In this paper, we present a study on different methods for linear compensation of embeddings making use of Gaussian mixture models to cluster shouted and normal speech domains. These compensation techniques are borrowed from the area of robustness for automatic speech recognition and, in this work, we apply them to compensate the mismatch between shouted and normal conditions in speaker verification. Before compensation, shouted condition is automatically detected by means of logistic regression. The process is computationally light and it is performed in the back-end of an x-vector system. Experimental results show that applying the proposed approach in the presence of vocal effort mismatch yields up to 13.8% equal error rate relative improvement with respect to a system that applies neither shouted speech detection nor compensation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aaron Nicolson|AUTHOR Aaron Nicolson]], [[Kuldip K. Paliwal|AUTHOR Kuldip K. Paliwal]]
</p><p class="cpabstractcardaffiliationlist">Griffith University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1516–1520&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce sum-product networks (SPNs) for robust speech processing through a simple robust automatic speaker identification (ASI) task.^^*^^ SPNs are deep probabilistic graphical models capable of answering multiple probabilistic queries. We show that SPNs are able to remain robust by using the marginal probability density function (PDF) of the spectral features that reliably represent speech. Though current SPN toolkits and learning algorithms are in their infancy, we aim to show that SPNs have the potential to become a useful tool for robust speech processing in the future. SPN speaker models are evaluated here on real-world non-stationary and coloured noise sources at multiple signal-to-noise ratio (SNR) levels. In terms of ASI accuracy, we find that SPN speaker models are more robust than two recent convolutional neural network (CNN)-based ASI systems. Additionally, SPN speaker models consist of significantly fewer parameters than their CNN-based counterparts. The results indicate that SPN speaker models could be a robust, parameter-efficient alternative for ASI. Additionally, this work demonstrates that SPNs have potential in related tasks, such as robust automatic speech recognition (ASR) and automatic speaker verification (ASV).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seung-bin Kim|AUTHOR Seung-bin Kim]], [[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ju-ho Kim|AUTHOR Ju-ho Kim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]
</p><p class="cpabstractcardaffiliationlist">University of Seoul, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1521–1525&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most studies on speaker verification systems focus on long-duration utterances, which are composed of sufficient phonetic information. However, the performances of these systems are known to degrade when short-duration utterances are inputted due to the lack of phonetic information as compared to the long utterances. In this paper, we propose a method that compensates for the performance degradation of speaker verification for short utterances, referred to as “//segment aggregation//”. The proposed method adopts an ensemble-based design to improve the stability and accuracy of speaker verification systems. The proposed method segments an input utterance into several short utterances and then aggregates the segment embeddings extracted from the segmented inputs to compose a speaker embedding. Then, this method simultaneously trains the segment embeddings and the aggregated speaker embedding. In addition, we also modified the teacher-student learning method for the proposed method. Experimental results on different input duration using the VoxCeleb1 test set demonstrate that the proposed technique improves speaker verification performance by about 45.37% relatively compared to the baseline system with 1-second test utterance condition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shai Rozenberg|AUTHOR Shai Rozenberg]], [[Hagai Aronowitz|AUTHOR Hagai Aronowitz]], [[Ron Hoory|AUTHOR Ron Hoory]]
</p><p class="cpabstractcardaffiliationlist">IBM, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1526–1529&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With the rise of voice-activated applications, the need for speaker recognition is rapidly increasing. The x-vector, an embedding approach based on a deep neural network (DNN), is considered the state-of-the-art when proper end-to-end training is not feasible. However, the accuracy significantly decreases when recording conditions (noise, sample rate, etc.) are mismatched, either between the x-vector training data and the target data or between enrollment and test data. We introduce the Siamese x-vector Reconstruction (SVR) for domain adaptation. We reconstruct the embedding of a higher quality signal from a lower quality counterpart using a lean auxiliary Siamese DNN. We evaluate our method on several mismatch scenarios and demonstrate significant improvement over the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanpei Shi|AUTHOR Yanpei Shi]], [[Qiang Huang|AUTHOR Qiang Huang]], [[Thomas Hain|AUTHOR Thomas Hain]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1530–1534&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While the use of deep neural networks has significantly boosted speaker recognition performance, it is still challenging to separate speakers in poor acoustic environments. Here speech enhancement methods have traditionally allowed improved performance. The recent works have shown that adapting speech enhancement can lead to further gains. This paper introduces a novel approach that cascades speech enhancement and speaker recognition. In the first step, a speaker embedding vector is generated, which is used in the second step to enhance the speech quality and re-identify the speakers. Models are trained in an integrated framework with joint optimisation. The proposed approach is evaluated using the Voxceleb1 dataset, which aims to assess speaker recognition in real world situations. In addition three types of noise at different signal-noise-ratios were added for this work. The obtained results show that the proposed approach using speaker dependent speech enhancement can yield better speaker recognition and speech enhancement performances than two baselines in various noise conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Galina Lavrentyeva|AUTHOR Galina Lavrentyeva]]^^1^^, [[Marina Volkova|AUTHOR Marina Volkova]]^^1^^, [[Anastasia Avdeeva|AUTHOR Anastasia Avdeeva]]^^2^^, [[Sergey Novoselov|AUTHOR Sergey Novoselov]]^^1^^, [[Artem Gorlanov|AUTHOR Artem Gorlanov]]^^2^^, [[Tseren Andzhukaev|AUTHOR Tseren Andzhukaev]]^^2^^, [[Artem Ivanov|AUTHOR Artem Ivanov]]^^2^^, [[Alexander Kozlov|AUTHOR Alexander Kozlov]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ITMO University, Russia; ^^2^^STC-innovations, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1535–1539&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The problem of system performance degradation in mismatched acoustic conditions has been widely acknowledged in the community and is common for different fields. The present state-of-the-art deep speaker embedding models are domain-sensitive. The main idea of the current research is to develop a single method for automatic signal quality estimation, which allows to evaluate short-term signal characteristics.

This paper presents a neural network based approach for blind speech signal quality estimation in terms of signal-to-noise ratio (SNR) and reverberation time (RT60), which is able to classify the type of underlying additive noise. Additionally, current research revealed the need for an accurate voice activity detector that performs well in both clean and noisy unseen environments. Therefore a novel neural network VAD based on U-net architecture is presented.The proposed algorithms allow to perform the analysis of NIST, SITW, Voices datasets commonly used for objective comparison of speaker verification systems from the new point of view and consider effective calibration steps to improve speaker recognition quality on them.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vaishali Pal|AUTHOR Vaishali Pal]]^^1^^, [[Fabien Guillot|AUTHOR Fabien Guillot]]^^1^^, [[Manish Shrivastava|AUTHOR Manish Shrivastava]]^^2^^, [[Jean-Michel Renders|AUTHOR Jean-Michel Renders]]^^1^^, [[Laurent Besacier|AUTHOR Laurent Besacier]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Naver, France; ^^2^^IIIT Hyderabad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1545–1549&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken dialogue systems typically use one or several (top-N) ASR sequence(s) for inferring the semantic meaning and tracking the state of the dialogue. However, ASR graphs, such as confusion networks (confnets), provide a compact representation of a richer hypothesis space than a top-N ASR list. In this paper, we study the benefits of using confusion networks with a neural dialogue state tracker (DST). We encode the 2-dimensional confnet into a 1-dimensional sequence of embeddings using a confusion network encoder which can be used with any DST system. Our confnet encoder is plugged into the ‘Global-locally Self-Attentive Dialogue State Tacker’ (GLAD) model for DST and obtains significant improvements in both accuracy and inference time compared to using top-N ASR hypotheses.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Antoine Caubrière|AUTHOR Antoine Caubrière]]^^1^^, [[Yannick Estève|AUTHOR Yannick Estève]]^^2^^, [[Antoine Laurent|AUTHOR Antoine Laurent]]^^1^^, [[Emmanuel Morin|AUTHOR Emmanuel Morin]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIUM (EA 4023), France; ^^2^^LIA (EA 4128), France; ^^3^^LS2N (UMR 6004), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1590–1594&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent studies have led to the introduction of Speech-to-Concept End-to-End (E2E) neural architectures for Spoken Language Understanding (SLU) that reach state of the art performance. In this work, we propose a way to compute confidence measures on semantic concepts recognized by a Speech-to-Text E2E SLU system. We investigate the use of the hidden representations of our CTC-based SLU system to train an external simple classifier. We experiment two kinds of external simple classifiers to analyze subsequences of hidden representations involved in recognized semantic concepts. The first external classifier is based on a MLP while the second one is based on a bLSTM neural network. We compare them to a baseline confidence measure computed directly from the softmax outputs of the E2E system. On the French challenging MEDIA corpus, when the confidence measure is used to reject, experiments show that using an external BLSTM significantly outperforms the other approaches in terms of precision/recall. To evaluate the additional information provided by this confidence measure, we compute the value of Normalised Cross-Entropy (NCE). Reaching a value equal to 0.288, we show that our best proposed confidence measure brings relevant information about the reliability of a recognized concept.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haoyu Wang|AUTHOR Haoyu Wang]], [[Shuyan Dong|AUTHOR Shuyan Dong]], [[Yue Liu|AUTHOR Yue Liu]], [[James Logan|AUTHOR James Logan]], [[Ashish Kumar Agrawal|AUTHOR Ashish Kumar Agrawal]], [[Yang Liu|AUTHOR Yang Liu]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1550–1554&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Domain-agnostic Automatic Speech Recognition (ASR) systems suffer from the issue of mistranscribing domain-specific words, which leads to failures in downstream tasks. In this paper, we present a post-editing ASR error correction method using the Transformer model for entity mention correction and retrieval. Specifically, we propose a novel augmented variant of the Transformer model that encodes both the word and phoneme sequence of an entity, and attends to phoneme information in addition to word-level information during decoding to correct mistranscribed named entities. We evaluate our method on both the ASR error correction task and the downstream retrieval task. Our method achieves 48.08% entity error rate (EER) reduction in ASR error correction task and 26.74% mean reciprocal rank (MRR) improvement for the retrieval task. In addition, our augmented Transformer model significantly outperforms the vanilla Transformer model with 17.89% EER reduction and 1.98% MRR increase, demonstrating the effectiveness of incorporating phoneme information in the correction model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xueli Jia|AUTHOR Xueli Jia]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Zhiyong Zhang|AUTHOR Zhiyong Zhang]], [[Ning Cheng|AUTHOR Ning Cheng]], [[Jing Xiao|AUTHOR Jing Xiao]]
</p><p class="cpabstractcardaffiliationlist">Ping An Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1555–1559&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end Spoken Language Understanding (SLU) models are made increasingly large and complex to achieve the state-of-the-art accuracy. However, the increased complexity of a model can also introduce high risk of over-fitting, which is a major challenge in SLU tasks due to the limitation of available data. In this paper, we propose an attention-based SLU model together with three encoder enhancement strategies to overcome data sparsity challenge. The first strategy focuses on the transfer-learning approach to improve feature extraction capability of the encoder. It is implemented by pre-training the encoder component with a quantity of Automatic Speech Recognition annotated data relying on the standard Transformer architecture and then fine-tuning the SLU model with a small amount of target labelled data. The second strategy adopts multi-task learning strategy, the SLU model integrates the speech recognition model by sharing the same underlying encoder, such that improving robustness and generalization ability. The third strategy, learning from Component Fusion (CF) idea, involves a Bidirectional Encoder Representation from Transformer (BERT) model and aims to boost the capability of the decoder with an auxiliary network. It hence reduces the risk of over-fitting and augments the ability of the underlying encoder, indirectly. Experiments on the FluentAI dataset show that cross-language transfer learning and multi-task strategies have been improved by up to 4.52% and 3.89% respectively, compared to the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Judith Gaspers|AUTHOR Judith Gaspers]], [[Quynh Do|AUTHOR Quynh Do]], [[Fabian Triefenbach|AUTHOR Fabian Triefenbach]]
</p><p class="cpabstractcardaffiliationlist">Amazon, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1560–1564&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite the fact that data imbalance is becoming more and more common in real-world Spoken Language Understanding (SLU) applications, it has not been studied extensively in the literature. To the best of our knowledge, this paper presents the first systematic study on handling data imbalance for SLU. In particular, we discuss the application of existing data balancing techniques for SLU and propose a multi-task SLU model for intent classification and slot filling. Aiming to avoid over-fitting, in our model methods for data balancing are leveraged indirectly via an auxiliary task which makes use of a class-balanced batch generator and (possibly) synthetic data. Our results on a real-world dataset indicate that i) our proposed model can boost performance on low frequency intents significantly while avoiding a potential performance decrease on the head intents, ii) synthetic data are beneficial for bootstrapping new intents when realistic data are not available, but iii) once a certain amount of realistic data becomes available, using synthetic data in the auxiliary task only yields better performance than adding them to the primary task training data, and iv) in a joint training scenario, balancing the intent distribution individually improves not only intent classification but also slot filling performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu Wang|AUTHOR Yu Wang]], [[Yilin Shen|AUTHOR Yilin Shen]], [[Hongxia Jin|AUTHOR Hongxia Jin]]
</p><p class="cpabstractcardaffiliationlist">Samsung, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1565–1569&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most of the existing spoken language understanding systems can perform only semantic frame parsing based on a single-round user query. They cannot take users’ feedback to update/add/remove slot values through multiround interactions with users. In this paper, we introduce a novel interactive adversarial reward learning-based spoken language understanding system that can leverage the multiround users’ feedback to update slot values. We perform two experiments on the benchmark ATIS dataset and demonstrate that the new system can improve parsing performance by at least 2.5% in terms of F1, with only one round of feedback. The improvement becomes even larger when the number of feedback rounds increases. Furthermore, we also compare the new system with state-of-the-art dialogue state tracking systems and demonstrate that the new interactive system can perform better on multiround spoken language understanding tasks in terms of slot- and sentence-level accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jin Cao|AUTHOR Jin Cao]], [[Jun Wang|AUTHOR Jun Wang]], [[Wael Hamza|AUTHOR Wael Hamza]], [[Kelly Vanee|AUTHOR Kelly Vanee]], [[Shang-Wen Li|AUTHOR Shang-Wen Li]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1570–1574&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural models have yielded state-of-the-art results in deciphering spoken language understanding (SLU) problems; however, these models require a significant amount of domain-specific labeled examples for training, which is prohibitively expensive. While pre-trained language models like BERT have been shown to capture a massive amount of knowledge by learning from unlabeled corpora and solve SLU using fewer labeled examples for adaption, the encoding of knowledge is implicit and agnostic to downstream tasks. Such encoding results in model inefficiencies in parameter usage: an entirely new model is required for every domain. To address these challenges, we introduce a novel SLU framework, comprising a conversational language modeling (CLM) pre-training task and a light encoder architecture. The CLM pre-training enables networks to capture the representation of the language in conversation style with the presence of ASR errors. The light encoder architecture separates the shared pre-trained networks from the mappings of generally encoded knowledge to specific domains of SLU, allowing for the domain adaptation to be performed solely at the light encoder and thus increasing efficiency. With the framework, we match the performance of state-of-the-art SLU results on Alexa internal datasets and on two public ones (ATIS, SNIPS), adding only 4.4% parameters per task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shota Orihashi|AUTHOR Shota Orihashi]], [[Mana Ihori|AUTHOR Mana Ihori]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Ryo Masumura|AUTHOR Ryo Masumura]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1575–1579&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a novel unsupervised domain adaptation method for dialogue sequence labeling. Dialogue sequence labeling is a supervised learning task that estimates labels for each utterance in the given dialogue document, and is useful for many applications such as topic segmentation and dialogue act estimation. Accurate labeling often requires a large amount of labeled training data, but it is difficult to collect such data every time we need to support a new domain, such as contact centers in a new business field. In order to solve this difficulty, we propose an unsupervised domain adaptation method for dialogue sequence labeling. Our key idea is to construct dialogue sequence labeling using labeled source domain data and unlabeled target domain data so as to remove domain dependencies at utterance-level and dialogue-level contexts. The proposed method adopts hierarchical adversarial training; two domain adversarial networks, an utterance-level context independent network and a dialogue-level context dependent network, are introduced for improving domain invariance in the dialogue sequence labeling. Experiments on Japanese simulated contact center dialogue datasets demonstrate the effectiveness of the proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Leda Sarı|AUTHOR Leda Sarı]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]
</p><p class="cpabstractcardaffiliationlist">University of Illinois at Urbana-Champaign, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1580–1584&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken language understanding (SLU) datasets, like many other machine learning datasets, usually suffer from the label imbalance problem. Label imbalance usually causes the learned model to replicate similar biases at the output which raises the issue of unfairness to the minority classes in the dataset. In this work, we approach the fairness problem by maximizing the F-measure instead of accuracy in neural network model training. We propose a differentiable approximation to the F-measure and train the network with this objective using standard back-propagation. We perform experiments on two standard fairness datasets, Adult, and Communities and Crime, and also on speech-to-intent detection on the ATIS dataset and speech-to-image concept classification on the Speech-COCO dataset. In all four of these tasks, F-measure maximization results in improved micro-F1 scores, with absolute improvements of up to 8% absolute, as compared to models trained with the cross-entropy loss function. In the two multi-class SLU tasks, the proposed approach significantly improves class coverage, i.e., the number of classes with positive recall.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Taesun Whang|AUTHOR Taesun Whang]]^^1^^, [[Dongyub Lee|AUTHOR Dongyub Lee]]^^2^^, [[Chanhee Lee|AUTHOR Chanhee Lee]]^^3^^, [[Kisu Yang|AUTHOR Kisu Yang]]^^3^^, [[Dongsuk Oh|AUTHOR Dongsuk Oh]]^^3^^, [[Heuiseok Lim|AUTHOR Heuiseok Lim]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Wisenut, Korea; ^^2^^Kakao, Korea; ^^3^^Korea University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1585–1589&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We focus on multi-turn response selection in a retrieval-based dialog system. In this paper, we utilize the powerful pre-trained language model Bi-directional Encoder Representations from Transformer (BERT) for a multi-turn dialog system and propose a highly effective post-training method on domain-specific corpus. Although BERT is easily adopted to various NLP tasks and outperforms previous baselines of each task, it still has limitations if a task corpus is too focused on a certain domain. Post-training on domain-specific corpus (e.g., Ubuntu Corpus) helps the model to train contextualized representations and words that do not appear in general corpus (e.g., English Wikipedia). Experimental results show that our approach achieves new state-of-the-art on two response selection benchmarks (i.e., Ubuntu Corpus V1, Advising Corpus) performance improvement by 5.9% and 6% on R,,10,,@1.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Grant L. McGuire|AUTHOR Grant L. McGuire]]^^1^^, [[Molly Babel|AUTHOR Molly Babel]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of California at Santa Cruz, USA; ^^2^^University of British Columbia, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1595–1599&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In an exposure phase, two groups of listeners were exposed to a set of 10 voices. These groups differed in terms of the task assigned during exposure: one group was asked to make a decision about the regional affiliation of the voices (Indexical Condition), while the other group orthographically transcribed the words presented (Lexical Condition). Both groups were given an identical test phase where they were presented with 20 voices (10 old, 10 new) and asked to make old/new decisions on the voices. While both groups of listeners performed at above chance accuracy levels in recognizing voices at test as old/new, listeners in the Indexical Condition performed more accurately. These results suggest that the nature of attention during exposure has consequences for subsequent performance, suggesting encoding differences as a result of task demands.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hassan Salami Kavaki|AUTHOR Hassan Salami Kavaki]], [[Michael I. Mandel|AUTHOR Michael I. Mandel]]
</p><p class="cpabstractcardaffiliationlist">CUNY Graduate Center, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1639–1643&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human listeners use specific cues to recognize speech and recent experiments have shown that certain time-frequency regions of individual utterances are more important to their correct identification than others. A model that could identify such cues or regions from clean speech would facilitate speech recognition and speech enhancement by focusing on those important regions. Thus, in this paper we present a model that can predict the regions of individual utterances that are important to an automatic speech recognition (ASR) “listener” by learning to add as much noise as possible to these utterances while still permitting the ASR to correctly identify them. This work utilizes a continuous speech recognizer to recognize multi-word utterances and builds upon our previous work that performed the same process for an isolated word recognizer. Our experimental results indicate that our model can apply noise to obscure 90.5% of the spectrogram while leaving recognition performance nearly unchanged.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anaïs Tran Ngoc|AUTHOR Anaïs Tran Ngoc]]^^1^^, [[Julien Meyer|AUTHOR Julien Meyer]]^^2^^, [[Fanny Meunier|AUTHOR Fanny Meunier]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BCL (UMR 7320), France; ^^2^^GIPSA-lab (UMR 5216), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1600–1604&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Whistled speech is a form of modified speech where some frequencies of vowels and consonants are augmented and transposed to whistling, modifying the timbre and the construction of each phoneme. These transformations cause only some elements of the signal to be intelligible for naive listeners, which, according to previous studies, includes vowel recognition. Here, we analyze naive listeners’ capacities for whistled consonant categorization for four consonants: /p/, /k/, /t/ and /s/ by presenting the findings of two behavioral experiments. Though both experiments measure whistled consonant categorization, we used modified frequencies — lowered with a phase vocoder — of the whistled stimuli in the second experiment to better identify the relative nature of pitch cues employed in this process. Results show that participants obtained approximately 50% of correct responses (when chance is at 25%). These findings show specific consonant preferences for “s” and “t” over “k” and “p”, specifically when stimuli is unmodified. Previous research on whistled consonants systems has often opposed “s” and “t” to “k” and “p”, due to their strong pitch modulations. The preference for these two consonants underlines the importance of these cues in phoneme processing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anaïs Tran Ngoc|AUTHOR Anaïs Tran Ngoc]]^^1^^, [[Julien Meyer|AUTHOR Julien Meyer]]^^2^^, [[Fanny Meunier|AUTHOR Fanny Meunier]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BCL (UMR 7320), France; ^^2^^GIPSA-lab (UMR 5216), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1605–1609&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we analyzed whistled vowel categorization by native French listeners. Whistled speech, a natural, yet modified register of speech, is used here as a tool to investigate perceptual processes in languages. We focused on four whistled vowels: /i, e, a, o/. After a detailed description of the vowels, we built and ran a behavioral experiment in which we asked native French speakers to categorize whistled vowel stimuli in which we introduced intra- and inter- production variations. In addition, half of the participants performed the experiment in person (at the laboratory) while the other half participated online, allowing us to evaluate the impact of the testing set up. Our results confirm that the categorization rate of whistled vowels is above chance. They reveal significant differences in performance for different vowels and suggest an influence of certain acoustic parameters from the whistlers’ vowel range on categorization. Moreover, no effect or interaction was found for testing location and circumstances in our data set. This study confirms that whistled stimuli are a useful tool for studying how listeners process modified speech and which parameters impact sound categorization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maria del Mar Cordero|AUTHOR Maria del Mar Cordero]]^^1^^, [[Fanny Meunier|AUTHOR Fanny Meunier]]^^1^^, [[Nicolas Grimault|AUTHOR Nicolas Grimault]]^^2^^, [[Stéphane Pota|AUTHOR Stéphane Pota]]^^3^^, [[Elsa Spinelli|AUTHOR Elsa Spinelli]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BCL (UMR 7320), France; ^^2^^Université Claude-Bernard Lyon 1, France; ^^3^^LPNC (UMR 5105), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1610–1614&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper evaluates the use of intonational cues during word segmentation in French. Specifically, we aim to examine how the characteristics of the fundamental frequency (F0) that can be observed at the beginning of words influence their processing. Native speakers of French were presented with phonemically identical sequences, such as /selami/ (//c’est l’amie/la mie// “it’s the friend/the crumb”). To test which properties of the F0 affect the perceived segmentation, we manipulated the F0 slope and/or the mean value of the first vowel /a/ in consonant-initial items (e.g., //l__a__ mie//). To assess differences in off-line vs online processing, we used a two-alternative, forced-choice task in Experiment 1 and a lexical decision task in Experiment 2. A previous study showed that vowel-initial segmentation was enhanced when the F0 mean value increased. However, the present study shows that modifying the F0 slope while keeping the F0 mean value constant also influences speech segmentation in both off-line and online tasks. This suggests that listeners use the F0 slope as a cue at the beginning of content words.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amandine Michelas|AUTHOR Amandine Michelas]], [[Sophie Dufour|AUTHOR Sophie Dufour]]
</p><p class="cpabstractcardaffiliationlist">LPL (UMR 7309), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1615–1619&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In two long-term repetition priming experiments, we investigated how accentual information is processed and represented in the French listeners’ mind. Repeated prime and target words either matched (/bãˈ''do''/ - / bãˈ''do''/ ‘headband’) or mismatched in their accentual patterns (/bãdo/ - /bãˈ''do''/). In experiment 1, the target words were presented in the left ear only, and attenuation in the repetition priming effect was observed when the primes and the targets mismatched in their accentual pattern. The differential priming effect between match and mismatch primes was no longer observed in Experiment 2 when the targets were presented in the right ear only. Together, these results showed that accentual variation at the word level in French is treated as related-talker variation, and only influences word recognition under specific circumstances, in particular, when we push word processing in the right hemisphere.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wen Liu|AUTHOR Wen Liu]]
</p><p class="cpabstractcardaffiliationlist">Shandong University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1620–1623&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous studies have shown that the perception is categorical when tones have different contours, whereas continuous when tones have the same contour. In this study, a perceptual experiment of the five level tones in Hmu (Xinzhai variety) was conducted to further examine this conclusion. Results show that in the identification test, continua between different level tones have different boundary width, which has a negative correlation with the pitch interval of two level tones. In the discrimination test, though there is no peak in discrimination curve, the discrimination accuracy reveals an important phenomenon that the accuracy is approximately 50% between two neighboring level tones, but higher when the level tones have a larger pitch interval. Besides, the boundary width is highly correlated with the discrimination accuracy (e.g., the narrower the boundary width, the higher the discrimination accuracy). These results reveal the basic characteristic of continuous perception, especially for level tones. Finally, the results also demonstrate that the category in categorical perception is not equal to phonological category.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhen Zeng|AUTHOR Zhen Zeng]]^^1^^, [[Karen Mattock|AUTHOR Karen Mattock]]^^1^^, [[Liquan Liu|AUTHOR Liquan Liu]]^^1^^, [[Varghese Peter|AUTHOR Varghese Peter]]^^1^^, [[Alba Tuninetti|AUTHOR Alba Tuninetti]]^^1^^, [[Feng-Ming Tsao|AUTHOR Feng-Ming Tsao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Western Sydney University, Australia; ^^2^^National Taiwan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1624–1628&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Listeners segment speech based on the rhythm of their native language(s) (e.g., stress- vs. syllable-timed, tone vs. non-tone) [1,2]. In English, the perception of speech rhythm relies on analyzing auditory cues pertinent to lexical stress, including pitch, duration and intensity [3]. Focusing on cross-linguistic impact on English lexical stress cue processing, the present study aims to explore English stress cue-weighting by Mandarin-speaking adults (with English adults as control), using an MMN multi-feature paradigm.

Preliminary ERP data revealed cross-linguistic perceptual differences to pitch and duration cues, but not to intensity cues in the bisyllabic non-word /dede/. Specifically, while English adults were similarly sensitive to pitch change at the initial and final syllable of the non-word, they were more sensitive to the duration change at the initial syllable. Comparatively, Mandarin adults were similarly sensitive to duration change at each position, but more sensitive to pitch at the final syllable. Lastly, both the Mandarin group and the English group were more sensitive to the intensity sound change at the second syllable. Possible explanations for these findings are discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yan Feng|AUTHOR Yan Feng]], [[Gang Peng|AUTHOR Gang Peng]], [[William Shi-Yuan Wang|AUTHOR William Shi-Yuan Wang]]
</p><p class="cpabstractcardaffiliationlist">PolyU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1629–1633&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study examined age-related differences in categorical perception of Mandarin lexical tones through comparing identification and discrimination performance among young adults, seniors aged 60–65 years, and older seniors aged 75–80 years. Results showed a significantly wider boundary and smaller peakedness in older seniors. There was also a positive correlation between the hearing level at 125 Hz and boundary width, and a negative correlation between hearing level (125 Hz) and peakedness in older seniors, indicating that the decline of tone perception in this population might be associated with degradation of hearing sensitivity. However, there was no significant difference between young adults and seniors aged 60–65 years, which might reveal that younger seniors could maintain normal ability to perceive tones categorically.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Georgia Zellou|AUTHOR Georgia Zellou]], [[Michelle Cohn|AUTHOR Michelle Cohn]]
</p><p class="cpabstractcardaffiliationlist">University of California at Davis, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1634–1638&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Increasingly, people are having conversational interactions with voice-AI systems, such as Amazon’s Alexa. Do the same social and functional pressures that mediate alignment toward human interlocutors also predict align patterns toward voice-AI? We designed an interactive dialogue task to investigate this question. Each trial consisted of scripted, interactive turns between a participant and a model talker (pre-recorded from either a natural production or voice-AI): First, participants produced target words in a carrier phrase. Then, a model talker responded with an utterance containing the target word. The interlocutor responses varied by 1) communicative affect (social) and 2) correctness (functional). Finally, participants repeated the carrier phrase. Degree of phonetic alignment was assessed acoustically between the target word in the model’s response and participants’ response. Results indicate that social and functional factors distinctly mediate alignment toward AI and humans. Findings are discussed with reference to theories of alignment and human-computer interaction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1644–1648&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we investigate the usefulness of the sign spectrum and its combination with the raw magnitude spectrum in acoustic modelling for automatic speech recognition (ASR). The sign spectrum is a sequence of ±1s, capturing one bit of the phase spectrum. It encodes information overlooked by the magnitude spectrum enabling unique signal characterisation and reconstruction. In particular, we demonstrate it carries information related to the temporal structure of the signal as well as the speech’s source component. Furthermore, we investigate the usefulness of combining it with the raw magnitude spectrum via multi-head CNNs at different fusion levels for ASR. While information-wise these two streams of information are together equivalent to the raw waveform signal the overall performance is noticeably higher than raw waveform and classic features such as MFCC and filterbank. This has been observed and verified in TIMIT, NTIMT, Aurora-4 and WSJ tasks and up to 14.5% relative WER reduction has been achieved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anurenjan Purushothaman|AUTHOR Anurenjan Purushothaman]], [[Anirudh Sreeram|AUTHOR Anirudh Sreeram]], [[Rohit Kumar|AUTHOR Rohit Kumar]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1688–1692&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition in reverberant conditions is a challenging task as the long-term envelopes of the reverberant speech are temporally smeared. In this paper, we propose a neural model for enhancement of sub-band temporal envelopes for dereverberation of speech. The temporal envelopes are derived using the autoregressive modeling framework of frequency domain linear prediction (FDLP). The neural enhancement model proposed in this paper performs an envelop gain based enhancement of temporal envelopes and it consists of a series of convolutional and recurrent neural network layers. The enhanced sub-band envelopes are used to generate features for automatic speech recognition (ASR). The ASR experiments are performed on the REVERB challenge dataset as well as the CHiME-3 dataset. In these experiments, the proposed neural enhancement approach provides significant improvements over a baseline ASR system with beamformed audio (average relative improvements of 21% on the development set and about 11% on the evaluation set in word error rates for REVERB challenge dataset).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Purvi Agrawal|AUTHOR Purvi Agrawal]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1649–1653&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech recognition in noisy and channel distorted scenarios is often challenging as the current acoustic modeling schemes are not adaptive to the changes in the signal distribution in the presence of noise. In this work, we develop a novel acoustic modeling framework for noise robust speech recognition based on relevance weighting mechanism. The relevance weighting is achieved using a sub-network approach that performs feature selection. A relevance sub-network is applied on the output of first layer of a convolutional network model operating on raw speech signals while a second relevance sub-network is applied on the second convolutional layer output. The relevance weights for the first layer correspond to an acoustic filterbank selection while the relevance weights in the second layer perform modulation filter selection. The model is trained for a speech recognition task on noisy and reverberant speech. The speech recognition experiments on multiple datasets (Aurora-4, CHiME-3, VOiCES) reveal that the incorporation of relevance weighting in the neural network architecture improves the speech recognition word error rates significantly (average relative improvements of 10% over the baseline systems).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dino Oglic|AUTHOR Dino Oglic]]^^1^^, [[Zoran Cvetkovic|AUTHOR Zoran Cvetkovic]]^^1^^, [[Peter Bell|AUTHOR Peter Bell]]^^2^^, [[Steve Renals|AUTHOR Steve Renals]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^King’s College London, UK; ^^2^^University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1654–1658&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Due to limited computational resources, acoustic models of early automatic speech recognition (ASR) systems were built in low-dimensional feature spaces that incur considerable information loss at the outset of the process. Several comparative studies of automatic and human speech recognition suggest that this information loss can adversely affect the robustness of ASR systems. To mitigate that and allow for learning of robust models, we propose a deep 2D convolutional network in the waveform domain. The first layer of the network decomposes waveforms into frequency sub-bands, thereby representing them in a structured high-dimensional space. This is achieved by means of a parametric convolutional block defined via cosine modulations of compactly supported windows. The next layer embeds the waveform in an even higher-dimensional space of high-resolution spectro-temporal patterns, implemented via a 2D convolutional block. This is followed by a gradual compression phase that selects most relevant spectro-temporal patterns using wide-pass 2D filtering. Our results show that the approach significantly outperforms alternative waveform-based models on both noisy and spontaneous conversational speech (24% and 11% relative error reduction, respectively). Moreover, this study provides empirical evidence that learning directly from the waveform domain could be more effective than learning using hand-crafted features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ludwig Kürzinger|AUTHOR Ludwig Kürzinger]], [[Nicolas Lindae|AUTHOR Nicolas Lindae]], [[Palle Klewitz|AUTHOR Palle Klewitz]], [[Gerhard Rigoll|AUTHOR Gerhard Rigoll]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität München, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1659–1663&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many end-to-end Automatic Speech Recognition (ASR) systems still rely on pre-processed frequency-domain features that are handcrafted to emulate the human hearing. Our work is motivated by recent advances in integrated learnable feature extraction. For this, we propose Lightweight Sinc-Convolutions (LSC) that integrate Sinc-convolutions with depthwise convolutions as a low-parameter machine-learnable feature extraction for end-to-end ASR systems.

We integrated LSC into the hybrid CTC/attention architecture for evaluation. The resulting end-to-end model shows smooth convergence behaviour that is further improved by applying SpecAugment in the time domain. We also discuss filter-level improvements, such as using log-compression as activation function. Our model achieves a word error rate of 10.7% on the TEDlium v2 test dataset, surpassing the corresponding architecture with log-mel filterbank features by an absolute 1.9%, but only has 21% of its model size.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pegah Ghahramani|AUTHOR Pegah Ghahramani]]^^1^^, [[Hossein Hadian|AUTHOR Hossein Hadian]]^^2^^, [[Daniel Povey|AUTHOR Daniel Povey]]^^3^^, [[Hynek Hermansky|AUTHOR Hynek Hermansky]]^^1^^, [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^Sharif University of Technology, Iran; ^^3^^Xiaomi, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1664–1667&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Mel scale is the most commonly used frequency warping function to extract features for automatic speech recognition (ASR) and is known to be quite effective. However, it is not specifically designed for ASR acoustic models based on deep neural networks (DNN). In this study, we introduce a frequency warping function which is a modified version of Mel scale. This warping function is parameterized using 2 parameters and we use it to propose a new set of features called modified Mel-frequency cepstral coefficients (MFCC), which use cosine-shaped filters. The bandwidths are computed using a new function. By evaluating the proposed features on a variety of ASR data sets, we see consistent improvements over regular MFCCs and (log) Mel filter bank energies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anirban Dutta|AUTHOR Anirban Dutta]], [[G. Ashishkumar|AUTHOR G. Ashishkumar]], [[Ch.V. Rama Rao|AUTHOR Ch.V. Rama Rao]]
</p><p class="cpabstractcardaffiliationlist">NIT Meghalaya, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1668–1672&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spectro-temporal feature extraction has shown its robustness in the field of speech recognition. However, these features are derived from magnitude spectrum of the complex Fourier Transform (FT). In this work, we investigate to see if phase information can substitute magnitude based spectro-temporal features. We compared with different state of art phase spectrum and evaluated its performance. The experiments are carried out in different noisy environments. We found Modified Group Delay (MODGD) spectrum to closely resemble the structure of power spectrum. A relative performance difference of 0.03% on average is observed for the MODGD spectro-temporal features compared to the magnitude based features. The analysis showed that phase can indeed carry equivalent or complementary information to magnitude based spectro-temporal features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Neethu M. Joy|AUTHOR Neethu M. Joy]]^^1^^, [[Dino Oglic|AUTHOR Dino Oglic]]^^1^^, [[Zoran Cvetkovic|AUTHOR Zoran Cvetkovic]]^^1^^, [[Peter Bell|AUTHOR Peter Bell]]^^2^^, [[Steve Renals|AUTHOR Steve Renals]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^King’s College London, UK; ^^2^^University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1673–1677&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep scattering spectrum consists of a cascade of wavelet transforms and modulus non-linearity. It generates features of different orders, with the first order coefficients approximately equal to the Mel-frequency cepstrum, and higher order coefficients recovering information lost at lower levels. We investigate the effect of including the information recovered by higher order coefficients on the robustness of speech recognition. To that end, we also propose a modification to the original scattering transform tailored for noisy speech. In particular, instead of the modulus non-linearity we opt to work with power coefficients and, therefore, use the squared modulus non-linearity. We quantify the robustness of scattering features using the word error rates of acoustic models trained on clean speech and evaluated using sets of utterances corrupted with different noise types. Our empirical results show that the second order scattering power spectrum coefficients capture invariants relevant for noise robustness and that this additional information improves generalization to unseen noise conditions (almost 20% relative error reduction on AURORA 4). This finding can have important consequences on speech recognition systems that typically discard the second order information and keep only the first order features (known for emulating MFCC and FBANK values) when representing speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Titouan Parcollet|AUTHOR Titouan Parcollet]], [[Xinchi Qiu|AUTHOR Xinchi Qiu]], [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]
</p><p class="cpabstractcardaffiliationlist">University of Oxford, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1678–1682&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Distant speech recognition remains a challenging application for modern deep learning based Automatic Speech Recognition (ASR) systems, due to complex recording conditions involving noise and reverberation. Multiple microphones are commonly combined with well-known speech processing techniques to enhance the original signals and thus enhance the speech recognizer performance. These multi-channel follow similar input distributions with respect to the global speech information but also contain an important part of noise. Consequently, the input representation robustness is key to obtaining reasonable recognition rates. In this work, we propose a Fusion Layer (FL) based on shared neural parameters. We use it to produce an expressive embedding of multiple microphone signals, that can easily be combined with any existing ASR pipeline. The proposed model called FusionRNN showed promising results on a multi-channel distant speech recognition task, and consistently outperformed baseline models while maintaining an equal training time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kshitiz Kumar|AUTHOR Kshitiz Kumar]], [[Bo Ren|AUTHOR Bo Ren]], [[Yifan Gong|AUTHOR Yifan Gong]], [[Jian Wu|AUTHOR Jian Wu]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1683–1687&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Data Simulation is a crucial technique for robust automatic speech recognition (ASR) systems. We develop this work in the scope of data augmentation and improve robustness by generating new bandpass noise resources from an existing noise corpus. We design numerous bandpass filters with varying center frequencies and filter bandwidths, and obtain corresponding bandpass noise samples. We augment our baseline data simulation with bandpass noises to ingest additional robustness and generalization to generic and unknown acoustic scenarios. This work targets ASR robustness to individual subband noises, and improves robustness to unseen real-world noise that can be approximated as a factorial combination of subband noises. We demonstrate our work for a large scale unified ASR task. We obtained 7% word error rate relative reduction (WERR) across unseen acoustic conditions and 11% WERR for kids speech. We also demonstrate generalization to new ASR applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kyubyong Park|AUTHOR Kyubyong Park]]^^1^^, [[Seanie Lee|AUTHOR Seanie Lee]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Kakao, Korea; ^^2^^KAIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1723–1727&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Conversion of Chinese graphemes to phonemes (G2P) is an essential component in Mandarin Chinese Text-To-Speech (TTS) systems. One of the biggest challenges in Chinese G2P conversion is how to disambiguate the pronunciation of polyphones — characters having multiple pronunciations. Although many academic efforts have been made to address it, there has been no open dataset that can serve as a standard benchmark for a fair comparison to date. In addition, most of the reported systems are hard to employ for researchers or practitioners who want to convert Chinese text into pinyin at their convenience. Motivated by these, in this work, we introduce a new benchmark dataset that consists of 99,000+ sentences for Chinese polyphone disambiguation. We train a simple Bi-LSTM model on it and find that it outperforms other pre-existing G2P systems and slightly underperforms pre-trained Chinese BERT. Finally, we package our project and share it on PyPi.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haiteng Zhang|AUTHOR Haiteng Zhang]], [[Huashan Pan|AUTHOR Huashan Pan]], [[Xiulin Li|AUTHOR Xiulin Li]]
</p><p class="cpabstractcardaffiliationlist">DataBaker Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1728–1732&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Polyphone disambiguation serves as an essential part of Mandarin text-to-speech (TTS) system. However, conventional system modelling the entire Pinyin set causes the case that prediction belongs to the unrelated polyphonic character instead of the current input one, which has negative impacts on TTS performance. To address this issue, we introduce a mask-based model for polyphone disambiguation. The model takes a mask vector extracted from the context as an extra input. In our model, the mask vector not only acts as a weighting factor in Weighted-softmax to prevent the case of mis-prediction but also eliminates the contribution of non-candidate set to the overall loss. Moreover, to mitigate the uneven distribution of pronunciation, we introduce a new loss called Modified Focal Loss. The experimental result shows the effectiveness of the proposed mask-based model. We also empirically studied the impact of Weighted-softmax and Modified Focal Loss. It was found that Weighted-softmax can effectively prevent the model from predicting outside the candidate set. Besides, Modified Focal Loss can reduce the adverse impacts of the uneven distribution of pronunciation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]]
</p><p class="cpabstractcardaffiliationlist">University of California at Davis, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1733–1737&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study tests speech-in-noise perception and social ratings of speech produced by different text-to-speech (TTS) synthesis methods. We used identical speaker training datasets for a set of 4 voices (using AWS Polly TTS), generated using neural and concatenative TTS. In Experiment 1, listeners identified target words in semantically predictable and unpredictable sentences in concatenative and neural TTS at two noise levels (-3 dB, -6 dB SNR). Correct word identification was lower for neural TTS than for concatenative TTS, in the lower SNR, and for semantically unpredictable sentences. In Experiment 2, listeners rated the voices on 4 social attributes. Neural TTS was rated as more human-like, natural, likeable, and familiar than concatenative TTS. Furthermore, how natural listeners rated the neural TTS voice was positively related to their speech-in-noise accuracy. Together, these findings show that the TTS method influences both intelligibility and social judgments of speech — and that these patterns are linked. Overall, this work contributes to our understanding of the nexus of speech technology and human speech perception.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jason Taylor|AUTHOR Jason Taylor]], [[Korin Richmond|AUTHOR Korin Richmond]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1738–1742&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural sequence-to-sequence (S2S) modelling encodes a single, unified representation for each input sequence. When used for text-to-speech synthesis (TTS), such representations must embed ambiguities between English spelling and pronunciation. For example, in //pothole// and //there// the character sequence //th// sounds different. This can be problematic when predicting pronunciation directly from letters. We posit pronunciation becomes easier to predict when letters are grouped into sub-word units like morphemes (e.g. a boundary lies between //t// and //h// in //pothole// but not //there//). Moreover, morphological boundaries can reduce the total number of, and increase the counts of, seen unit subsequences. Accordingly, we test here the effect of augmenting input sequences of letters with morphological boundaries. We find morphological boundaries substantially lower the Word and Phone Error Rates (WER and PER) for a Bi-LSTM performing G2P on one hand, and also increase the naturalness scores of Tacotrons performing TTS in a MUSHRA listening test on the other. The improvements to TTS quality are such that grapheme input augmented with morphological boundaries outperforms phone input without boundaries. Since morphological segmentation may be predicted with high accuracy, we highlight this simple pre-processing step has important potential for S2S modelling in TTS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yeunju Choi|AUTHOR Yeunju Choi]], [[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Hoirin Kim|AUTHOR Hoirin Kim]]
</p><p class="cpabstractcardaffiliationlist">KAIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1743–1747&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While deep learning has made impressive progress in speech synthesis and voice conversion, the assessment of the synthesized speech is still carried out by human participants. Several recent papers have proposed deep-learning-based assessment models and shown the potential to automate the speech quality assessment. To improve the previously proposed assessment model, MOSNet, we propose three models using cluster-based modeling methods: using a global quality token (GQT) layer, using an Encoding Layer, and using both of them. We perform experiments using the evaluation results of the Voice Conversion Challenge 2018 to predict the mean opinion score of synthesized speech and similarity score between synthesized speech and reference speech. The results show that the GQT layer helps to predict human assessment better by automatically learning the useful quality tokens for the task and that the Encoding Layer helps to utilize frame-level scores more precisely.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gabriel Mittag|AUTHOR Gabriel Mittag]], [[Sebastian Möller|AUTHOR Sebastian Möller]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität Berlin, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1748–1752&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a new objective prediction model for synthetic speech naturalness. It can be used to evaluate Text-To-Speech or Voice Conversion systems and works language independently. The model is trained end-to-end and based on a CNN-LSTM network that previously showed to give good results for speech quality estimation. We trained and tested the model on 16 different datasets, such as from the Blizzard Challenge and the Voice Conversion Challenge. Further, we show that the reliability of deep learning-based naturalness prediction can be improved by transfer learning from speech quality prediction models that are trained on objective POLQA scores. The proposed model is made publicly available and can, for example, be used to evaluate different TTS system configurations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiawen Zhang|AUTHOR Jiawen Zhang]]^^1^^, [[Yuanyuan Zhao|AUTHOR Yuanyuan Zhao]]^^2^^, [[Jiaqi Zhu|AUTHOR Jiaqi Zhu]]^^1^^, [[Jinba Xiao|AUTHOR Jinba Xiao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Kwai, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1753–1757&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Grapheme-to-phoneme (G2P) conversion plays an important role in building a Mandarin Chinese text-to-speech (TTS) system, where the polyphone disambiguation is an indispensable task. However, most of the previous polyphone disambiguation models are trained on manually annotated datasets, which are suffering from data scarcity, narrow coverage, and unbalanced data distribution. In this paper, we propose a framework that can predict the pronunciations of Chinese characters, and the core model is trained in a distantly supervised way. Specifically, we utilize the alignment procedure used for acoustic models to produce abundant character-phoneme sequence pairs, which are employed to train a Seq2Seq model with attention mechanism. We also make use of a language model that is trained on phoneme sequences to alleviate the impact of noises in the auto-generated dataset. Experimental results demonstrate that even without additional syntactic features and pre-trained embeddings, our approach achieves competitive prediction results, and especially improves the predictive accuracy for unbalanced polyphonic characters. In addition, compared with the manually annotated training datasets, the auto-generated one is more diversified and makes the results more consistent with the pronunciation habits of most people.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pilar Oplustil Gallegos|AUTHOR Pilar Oplustil Gallegos]], [[Jennifer Williams|AUTHOR Jennifer Williams]], [[Joanna Rownicka|AUTHOR Joanna Rownicka]], [[Simon King|AUTHOR Simon King]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1758–1762&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Large multi-speaker datasets for TTS typically contain diverse speakers, recording conditions, styles and quality of data. Although one might generally presume that more data is better, in this paper we show that a model trained on a carefully-chosen subset of speakers from LibriTTS provides significantly better quality synthetic speech than a model trained on a larger set. We propose an unsupervised methodology to find this subset by clustering per-speaker acoustic representations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anurag Das|AUTHOR Anurag Das]]^^1^^, [[Guanlong Zhao|AUTHOR Guanlong Zhao]]^^1^^, [[John Levis|AUTHOR John Levis]]^^2^^, [[Evgeny Chukharev-Hudilainen|AUTHOR Evgeny Chukharev-Hudilainen]]^^2^^, [[Ricardo Gutierrez-Osuna|AUTHOR Ricardo Gutierrez-Osuna]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Texas A&M University, USA; ^^2^^Iowa State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1763–1767&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a methodology to study the role of non-native accents on talker recognition by humans. The methodology combines a state-of-the-art accent-conversion system to resynthesize the voice of a speaker with a different accent of her/his own, and a protocol for perceptual listening tests to measure the relative contribution of accent and voice quality on speaker similarity. Using a corpus of non-native and native speakers, we generated accent conversions in two different directions: non-native speakers with native accents, and native speakers with non-native accents. Then, we asked listeners to rate the similarity between 50 pairs of real or synthesized speakers. Using a linear mixed effects model, we find that (for our corpus) the effect of voice quality is five times as large as that of non-native accent, and that the effect goes away when speakers share the same (native) accent. We discuss the potential significance of this work in earwitness identification and sociophonetics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Zhou|AUTHOR Wei Zhou]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1768–1772&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>As one popular modeling approach for end-to-end speech recognition, attention-based encoder-decoder models are known to suffer the length bias and corresponding beam problem. Different approaches have been applied in simple beam search to ease the problem, most of which are heuristic-based and require considerable tuning. We show that heuristics are not proper modeling refinement, which results in severe performance degradation with largely increased beam sizes. We propose a novel beam search derived from reinterpreting the sequence posterior with an explicit length modeling. By applying the reinterpreted probability together with beam pruning, the obtained final probability leads to a robust model modification, which allows reliable comparison among output sequences of different lengths. Experimental verification on the LibriSpeech corpus shows that the proposed approach solves the length bias problem without heuristics or additional tuning effort. It provides robust decision making and consistently good performance under both small and very large beam sizes. Compared with the best results of the heuristic baseline, the proposed approach achieves the same WER on the ‘clean’ sets and 4% relative improvement on the ‘other’ sets. We also show that it is more efficient with the additional derived early stopping criterion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xi Chen|AUTHOR Xi Chen]]^^1^^, [[Songyang Zhang|AUTHOR Songyang Zhang]]^^2^^, [[Dandan Song|AUTHOR Dandan Song]]^^3^^, [[Peng Ouyang|AUTHOR Peng Ouyang]]^^3^^, [[Shouyi Yin|AUTHOR Shouyi Yin]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^ShanghaiTech, China; ^^3^^Tsing Micro, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1773–1777&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based models have made tremendous progress on end-to-end automatic speech recognition (ASR) recently. However, the conventional transformer-based approaches usually generate the sequence results token by token from left to right, leaving the right-to-left contexts unexploited. In this work, we introduce a bidirectional speech transformer to utilize the different directional contexts simultaneously. Specifically, the outputs of our proposed transformer include a left-to-right target, and a right-to-left target. In inference stage, we use the introduced bidirectional beam search method, which can not only generate left-to-right candidates but also generate right-to-left candidates, and determine the best hypothesis by the score.

To demonstrate our proposed speech transformer with a bidirectional decoder (STBD), we conduct extensive experiments on the AISHELL-1 dataset. The results of experiments show that STBD achieves a 3.6% relative CER reduction (CERR) over the unidirectional speech transformer baseline. Besides, the strongest model in this paper called STBD-Big can achieve 6.64% CER on the test set, without language model rescoring and any extra data augmentation strategies.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weiran Wang|AUTHOR Weiran Wang]], [[Guangsen Wang|AUTHOR Guangsen Wang]], [[Aadyot Bhatnagar|AUTHOR Aadyot Bhatnagar]], [[Yingbo Zhou|AUTHOR Yingbo Zhou]], [[Caiming Xiong|AUTHOR Caiming Xiong]], [[Richard Socher|AUTHOR Richard Socher]]
</p><p class="cpabstractcardaffiliationlist">Salesforce, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1778–1782&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Phones and their context-dependent variants have been the standard modeling units for conventional speech recognition systems, while characters and subwords have demonstrated their effectiveness for end-to-end recognition systems. We investigate the use of phone-based subwords, in particular, byte pair encoder (BPE), as modeling units for end-to-end speech recognition. In addition, we also developed multi-level language model-based decoding algorithms based on a pronunciation dictionary. Besides the use of the lexicon, which is easily available, our system avoids the need of additional expert knowledge or processing steps from conventional systems. Experimental results show that phone-based BPEs tend to yield more accurate recognition systems than the character-based counterpart. In addition, further improvement can be obtained with a novel one-pass joint beam search decoder, which efficiently combines phone- and character-based BPE systems. For Switchboard, our phone-based BPE system achieves 6.8%/14.4% word error rate (WER) on the Switchboard/CallHome portion of the test set while joint decoding achieves 6.3%/13.3% WER. On Fisher + Switchboard, joint decoding leads to 4.9%/9.5% WER, setting new milestones for telephony speech recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jeremy H.M. Wong|AUTHOR Jeremy H.M. Wong]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Rui Zhao|AUTHOR Rui Zhao]], [[Liang Lu|AUTHOR Liang Lu]], [[Eric Sun|AUTHOR Eric Sun]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Yifan Gong|AUTHOR Yifan Gong]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1783–1787&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent studies suggest that it may now be possible to construct end-to-end Neural Network (NN) models that perform on-par with, or even outperform, hybrid models in speech recognition. These models differ in their designs, and as such, may exhibit diverse and complementary error patterns. A combination between the predictions of these models may therefore yield significant gains. This paper studies the feasibility of performing hypothesis-level combination between hybrid and end-to-end NN models. The end-to-end NN models often exhibit a bias in their posteriors toward short hypotheses, and this may adversely affect Minimum Bayes’ Risk (MBR) combination methods. MBR training and length normalisation can be used to reduce this bias. Models are trained on Microsoft’s 75 thousand hours of anonymised data and evaluated on test sets with 1.8 million words. The results show that significant gains can be obtained by combining the hypotheses of hybrid and end-to-end NN models together.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jihwan Kim|AUTHOR Jihwan Kim]]^^1^^, [[Jisung Wang|AUTHOR Jisung Wang]]^^2^^, [[Sangki Kim|AUTHOR Sangki Kim]]^^1^^, [[Yeha Lee|AUTHOR Yeha Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^VUNO, Korea; ^^2^^Naver, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1788–1792&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural architecture search (NAS) has been successfully applied to finding efficient, high-performance deep neural network architectures in a task-adaptive manner without extensive human intervention. This is achieved by choosing genetic, reinforcement learning, or gradient -based algorithms as automative alternatives of manual architecture design. However, a naive application of existing NAS algorithms to different tasks may result in architectures which perform sub-par to those manually designed. In this work, we show that NAS can provide efficient architectures that outperform manually designed attention-based architectures on speech recognition tasks, after which we named Evolved Speech-Transformer (EST). With a combination of carefully designed search space and Progressive dynamic hurdles, a genetic algorithm based, our algorithm finds a memory-efficient architecture which outperforms vanilla Transformer with reduced training time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abhinav Garg|AUTHOR Abhinav Garg]], [[Ashutosh Gupta|AUTHOR Ashutosh Gupta]], [[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]], [[Shatrughan Singh|AUTHOR Shatrughan Singh]], [[Chanwoo Kim|AUTHOR Chanwoo Kim]]
</p><p class="cpabstractcardaffiliationlist">Samsung, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1793–1797&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a hierarchical multi-stage word-to-grapheme Named Entity Correction (NEC) algorithm. Conventional NEC algorithms use a single-stage grapheme or phoneme level edit distance to search and replace Named Entities (NEs) misrecognized by a speech recognizer. However, longer named entities like song titles cannot be easily handled by such a single stage correction. We propose a three-stage NEC, starting with a word-level matching, followed by a phonetic double metaphone based matching, and a final grapheme level candidate selection. We also propose a novel NE Rejection mechanism which is important to ensure that the NEC does not replace correctly recognized NEs with unintended but similar named entities. We evaluate our solution on two different test sets from the //call// and //music// domains, for both server as well as on-device speech recognition configurations. For the on-device model, our NEC outperforms an n-gram fusion when employed standalone. Our NEC reduces the word error rate by 14% and 63% relatively for //music// and //call//, respectively, when used after an n-gram based biasing language model. The average latency of our NEC is under 3 ms per input sentence while using only ~1 MB for an input NE list of 20,000 entries.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Eugen Beck|AUTHOR Eugen Beck]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1798–1802&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural network language models (LMs) based on self-attention have recently outperformed the previous state of the art, LSTM LMs. Transformer LMs today are often used as a postprocessing step in lattice or n-best list rescoring. In this work the main focus is on using them in one-pass recognition. We show that by a simple reduction of redundant computations in batched self-attention we can obtain a 15% reduction in overall RTF on a well-tuned system. We also show that through proper initialization the layer normalization inside the residual blocks can be removed, yielding a further increase in forwarding speed. This is done under the constraint of staying close to state-of-the-art in terms of word-error rate (5.4% on LibriSpeech test-other) and achieving a real-time factor of around 1. Last but not least we also present an approach to speed up classic push-forward rescoring by mixing it with n-best list rescoring to better utilize the inherent parallelizability of Transformer language models, cutting the time needed for rescoring in half.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi-Chen Chen|AUTHOR Yi-Chen Chen]]^^1^^, [[Jui-Yang Hsu|AUTHOR Jui-Yang Hsu]]^^1^^, [[Cheng-Kuang Lee|AUTHOR Cheng-Kuang Lee]]^^2^^, [[Hung-yi Lee|AUTHOR Hung-yi Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Taiwan University; ^^2^^NVIDIA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1803–1807&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In previous works, only parameter weights of ASR models are optimized under fixed-topology architecture. However, the design of successful model architecture has always relied on human experience and intuition. Besides, many hyperparameters related to model architecture need to be manually tuned. Therefore in this paper, we propose an ASR approach with efficient gradient-based architecture search, DARTS-ASR. In order to examine the generalizability of DARTS-ASR, we apply our approach not only on many languages to perform monolingual ASR, but also on a multilingual ASR setting. Following previous works, we conducted experiments on a multilingual dataset, IARPA BABEL. The experiment results show that our approach outperformed the baseline fixed-topology architecture by 10.2% and 10.0% relative reduction on character error rates under monolingual and multilingual ASR settings respectively. Furthermore, we perform some analysis on the searched architectures by DARTS-ASR.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lukas Stappen|AUTHOR Lukas Stappen]]^^1^^, [[Georgios Rizos|AUTHOR Georgios Rizos]]^^2^^, [[Madina Hasan|AUTHOR Madina Hasan]]^^3^^, [[Thomas Hain|AUTHOR Thomas Hain]]^^3^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Augsburg, Germany; ^^2^^Imperial College London, UK; ^^3^^University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1808–1812&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The evaluation of scientific submissions through peer review is both the most fundamental component of the publication process, as well as the most frequently criticised and questioned. Academic journals and conferences request reviews from multiple reviewers per submission, which an editor, or area chair aggregates into the final acceptance decision. Reviewers are often in disagreement due to varying levels of domain expertise, confidence, levels of motivation, as well as due to the heavy workload and the different interpretations by the reviewers of the score scale. Herein, we explore the possibility of a computational decision support tool for the editor, based on Natural Language Processing, that offers an additional aggregated recommendation. We provide a comparative study of state-of-the-art text modelling methods on the newly crafted, largest review dataset of its kind based on Interspeech 2019, and we are the first to explore uncertainty-aware methods (soft labels, quantile regression) to address the subjectivity inherent in this problem.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sadari Jayawardena|AUTHOR Sadari Jayawardena]], [[Julien Epps|AUTHOR Julien Epps]], [[Zhaocheng Huang|AUTHOR Zhaocheng Huang]]
</p><p class="cpabstractcardaffiliationlist">UNSW Sydney, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1853–1857&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many affective computing datasets are annotated using ordinal scales, as are many other forms of ground truth involving subjectivity, e.g. depression severity. When investigating these datasets, the speech processing community has chosen classification problems in some cases, and regression in others, while ordinal regression may also arguably be the correct approach for some. However, there is currently essentially no guidance on selecting a suitable machine learning and evaluation method. To investigate this problem, this paper proposes a neural network-based framework which can transition between different modelling methods with the help of a novel multi-term loss function. Experiments on synthetic datasets show that the proposed framework is empirically well-behaved and able to correctly identify classification-like, ordinal regression-like and regression-like properties within multidimensional datasets. Application of the proposed framework to six real datasets widely used in affective computing and related fields suggests that more focus should be placed on ordinal regression instead of classifying or predicting, which are the common practices to date.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Melina Sarian|AUTHOR Melina Sarian]], [[Kristin Predeck|AUTHOR Kristin Predeck]], [[Georgia Zellou|AUTHOR Georgia Zellou]]
</p><p class="cpabstractcardaffiliationlist">University of California at Davis, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1813–1817&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>More and more, humans are engaging with voice-activated artificially intelligent (voice-AI) systems that have names (e.g., Alexa), apparent genders, and even emotional expression; they are in many ways a growing ‘social’[ presence. But to what extent do people display sociolinguistic attitudes, developed from human-human interaction, toward these disembodied text-to-speech (TTS) voices? And how might they vary based on the cognitive traits of the individual user? The current study addresses these questions, testing native English speakers’ judgments for 6 traits (intelligent, likeable, attractive, professional, human-like, and age) for a naturally-produced female human voice and the US-English default Amazon Alexa voice. Following exposure to the voices, participants completed these ratings for each speaker, as well as the Autism Quotient (AQ) survey, to assess individual differences in cognitive processing style. Results show differences in individuals’ ratings of the likeability and human-likeness of the human and AI talkers based on AQ score. Results suggest that humans transfer social assessment of human voices to voice-AI, but that the way they do so is mediated by their own cognitive characteristics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michelle Cohn|AUTHOR Michelle Cohn]]^^1^^, [[Eran Raveh|AUTHOR Eran Raveh]]^^2^^, [[Kristin Predeck|AUTHOR Kristin Predeck]]^^1^^, [[Iona Gessinger|AUTHOR Iona Gessinger]]^^2^^, [[Bernd Möbius|AUTHOR Bernd Möbius]]^^2^^, [[Georgia Zellou|AUTHOR Georgia Zellou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of California at Davis, USA; ^^2^^Universität des Saarlandes, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1818–1822&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The present study compares how individuals perceive gradient acoustic realizations of emotion produced by a human voice versus an Amazon Alexa text-to-speech (TTS) voice. We manipulated semantically neutral sentences spoken by both talkers with identical emotional synthesis methods, using three levels of increasing ‘happiness’ (0%, 33%, 66% ‘happier’). On each trial, listeners (native speakers of American English, n=99) rated a given sentence on two scales to assess dimensions of emotion: valence (negative-positive) and arousal (calm-excited). Participants also rated the Alexa voice on several parameters to assess anthropomorphism (e.g., naturalness, human-likeness, etc.). Results showed that the emotion manipulations led to increases in perceived positive valence and excitement. Yet, the effect differed by interlocutor: increasing ‘happiness’ manipulations led to larger changes for the human voice than the Alexa voice. Additionally, we observed individual differences in perceived valence/arousal based on participants’ anthropomorphism scores. Overall, this line of research can speak to theories of computer personification and elucidate our changing relationship with voice-AI technology.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Luz Martinez-Lucas|AUTHOR Luz Martinez-Lucas]], [[Mohammed Abdelwahab|AUTHOR Mohammed Abdelwahab]], [[Carlos Busso|AUTHOR Carlos Busso]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1823–1827&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human-computer interactions can be very effective, especially if computers can automatically recognize the emotional state of the user. A key barrier for effective speech emotion recognition systems is the lack of large corpora annotated with emotional labels that reflect the temporal complexity of expressive behaviors, especially during multiparty interactions. This paper introduces the MSP-Conversation corpus, which contains interactions annotated with time-continuous emotional traces for arousal (calm to active), valence (negative to positive), and dominance (weak to strong). Time-continuous annotations offer the flexibility to explore emotional displays at different temporal resolutions while leveraging contextual information. This is an ongoing effort, where the corpus currently contains more than 15 hours of speech annotated by at least five annotators. The data is sourced from the MSP-Podcast corpus, which contains speech data from online audio-sharing websites annotated with sentence-level emotional scores. This data collection scheme is an easy, affordable, and scalable approach to obtain natural data with diverse emotional content from multiple speakers. This study describes the key features of the corpus. It also compares the time-continuous evaluations from the MSP-Conversation corpus with the sentence-level annotations of the MSP-Podcast corpus for the speech segments that overlap between the two corpora.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fuxiang Tao|AUTHOR Fuxiang Tao]]^^1^^, [[Anna Esposito|AUTHOR Anna Esposito]]^^2^^, [[Alessandro Vinciarelli|AUTHOR Alessandro Vinciarelli]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Glasgow, UK; ^^2^^Università della Campania “Luigi Vanvitelli”, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1828–1832&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work investigates the use of a classification approach as a means to identify effective depression markers in read speech, i.e., observable and measurable traces of the pathology in the way people read a predefined text. This is important because the diagnosis of depression is still a challenging problem and reliable markers can, at least to a partial extent, contribute to address it. The experiments have involved 110 individuals and revolve around the tendency of depressed people to read slower and display silences that are both longer and more frequent. The results show that features expected to capture such differences reduce the error rate of a baseline classifier by more than 50% (from 31.8% to 15.5%). This is of particular interest when considering that the new features are less than 10% of the original set (3 out of 32). Furthermore, the results appear to be in line with the findings of neuroscience about brain-level differences between depressed and non-depressed individuals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yelin Kim|AUTHOR Yelin Kim]], [[Joshua Levy|AUTHOR Joshua Levy]], [[Yang Liu|AUTHOR Yang Liu]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1833–1837&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For an interactive agent, such as task-oriented spoken dialog systems or chatbots, measuring and adapting to Customer Satisfaction (CSAT) is critical in order to understand user perception of an agent’s behavior and increase user engagement and retention. However, an agent often relies on explicit customer feedback for measuring CSAT. Such explicit feedback may result in potential distraction to users and it can be challenging to capture continuously changing user’s satisfaction. To address this challenge, we present a new approach to automatically estimate CSAT using acoustic and lexical information in the Alexa Prize Socialbot data. We first explore the relationship between CSAT and sentiment scores at both the utterance and conversation level. We then investigate static and temporal modeling methods that use estimated sentiment scores as a mid-level representation. The results show that the sentiment scores, particularly valence and satisfaction, are correlated with CSAT. We also demonstrate that our proposed temporal modeling approach for estimating CSAT achieves competitive performance, relative to static baselines as well as human performance. This work provides insights into open domain social conversations between real users and socialbots, and the use of both acoustic and lexical information for understanding the relationship between CSAT and sentiment scores.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haley Lepp|AUTHOR Haley Lepp]]^^1^^, [[Gina-Anne Levow|AUTHOR Gina-Anne Levow]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Educational Testing Service, USA; ^^2^^University of Washington, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1838–1842&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study presents a corpus of turn changes between speakers in U.S. Supreme Court oral arguments. Each turn change is labeled on a spectrum of “cooperative” to “competitive” by a human annotator with legal experience in the United States. We analyze the relationship between speech features, the nature of exchanges, and the gender and legal role of the speakers. Finally, we demonstrate that the models can be used to predict the label of an exchange with moderate success. The automatic classification of the nature of exchanges indicates that future studies of turn-taking in oral arguments can rely on larger, unlabeled corpora.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jana Neitsch|AUTHOR Jana Neitsch]], [[Oliver Niebuhr|AUTHOR Oliver Niebuhr]]
</p><p class="cpabstractcardaffiliationlist">University of Southern Denmark, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1843–1847&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Hate speech, both written and spoken, is a growing source of concern as it often discriminates societal minorities for their national origin, sexual orientation, gender or disabilities. Despite its destructive power, hardly anything is known about whether there are cross-linguistic mechanisms and acoustic-phonetic characteristics of hate speech. For this reason, our experiment analyzes the implicit prosodies that are caused by written Twitter and Facebook hate-speech items and made phonetically “tangible” through a special, introspective reading-aloud task. We compare the elicited (implicit) prosodies of Danish and German speakers with respect to f0, intensity, HNR, and the Hammarberg index. While we found no evidence for a consistent hate-speech-specific prosody either within or between the two languages, our results show clear prosodic differences associated with types of hate speech and their targeted minority groups. Moreover, language-specific differences suggest that — compared to Danish — German hate speech sounds more expressive and hateful. Results are discussed regarding their implications for the perceived severity and the automatic flagging and deletion of hate-speech posts in social media.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fuling Chen|AUTHOR Fuling Chen]], [[Roberto Togneri|AUTHOR Roberto Togneri]], [[Murray Maybery|AUTHOR Murray Maybery]], [[Diana Tan|AUTHOR Diana Tan]]
</p><p class="cpabstractcardaffiliationlist">University of Western Australia, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1848–1852&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human voices vary in their perceived masculinity or femininity, and subjective gender scores provided by human raters have long been used in psychological studies to understand the complex psychosocial relationships between people. However, there has been limited research on developing objective gender scoring of voices and examining the correlation between objective gender scores (including the weighting of each acoustic factor) and subjective gender scores (i.e., perceived masculinity/ femininity). In this work we propose a gender scoring model based on Linear Discriminant Analysis (LDA) and using weakly labelled data to objectively rate speakers’ masculinity and femininity. For 434 speakers, we investigated 29 acoustic measures of voice characteristics and their relationships to both the objective scores and subjective masculinity/femininity ratings. The results revealed close correspondence between objective scores and subjective ratings of masculinity for males and femininity for females (correlations of 0.667 and 0.505 respectively). Among the 29 measures, F0 was found to be the most important vocal characteristic influencing both objective and subjective ratings for both sexes. For female voices, local absolute jitter and Harmonic-to-Noise Ratio (HNR) were moderately associated with objective scores. For male voices, F0 variance influenced objective gender scores more than the subjective ratings provided by human listeners.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[N. Tomashenko|AUTHOR N. Tomashenko]]^^1^^, [[Brij Mohan Lal Srivastava|AUTHOR Brij Mohan Lal Srivastava]]^^2^^, [[Xin Wang|AUTHOR Xin Wang]]^^3^^, [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]^^4^^, [[Andreas Nautsch|AUTHOR Andreas Nautsch]]^^5^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^3^^, [[Nicholas Evans|AUTHOR Nicholas Evans]]^^5^^, [[Jose Patino|AUTHOR Jose Patino]]^^5^^, [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]^^1^^, [[Paul-Gauthier Noé|AUTHOR Paul-Gauthier Noé]]^^1^^, [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIA (EA 4128), France; ^^2^^Inria, France; ^^3^^NII, Japan; ^^4^^Loria (UMR 7503), France; ^^5^^EURECOM, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1693–1697&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The VoicePrivacy initiative aims to promote the development of privacy preservation tools for speech technology by gathering a new community to define the tasks of interest and the evaluation methodology, and benchmarking solutions through a series of challenges. In this paper, we formulate the voice anonymization task selected for the VoicePrivacy 2020 Challenge and describe the datasets used for system development and evaluation. We also present the attack models and the associated objective and subjective evaluation metrics. We introduce two anonymization baselines and report objective evaluation results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andreas Nautsch|AUTHOR Andreas Nautsch]]^^1^^, [[Jose Patino|AUTHOR Jose Patino]]^^1^^, [[N. Tomashenko|AUTHOR N. Tomashenko]]^^2^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^3^^, [[Paul-Gauthier Noé|AUTHOR Paul-Gauthier Noé]]^^2^^, [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]^^2^^, [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]^^1^^, [[Nicholas Evans|AUTHOR Nicholas Evans]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^EURECOM, France; ^^2^^LIA (EA 4128), France; ^^3^^NII, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1698–1702&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Mounting privacy legislation calls for the preservation of privacy in speech technology, though solutions are gravely lacking. While evaluation campaigns are long-proven tools to drive progress, the need to consider a privacy //adversary// implies that traditional approaches to evaluation must be adapted to the assessment of privacy and privacy preservation solutions. This paper presents the first step in this direction: //metrics//.

We introduce the zero evidence biometric recognition assessment (ZEBRA) framework and propose two new privacy metrics. They measure the //average// level of privacy preservation afforded by a given safeguard for a population and the //worst-case// privacy disclosure for an individual. The paper demonstrates their application to privacy preservation assessment within the scope of the VoicePrivacy challenge. While the ZEBRA framework is designed with speech applications in mind, it is a candidate for incorporation into biometric information protection standards and is readily extendable to the study of privacy in applications even beyond speech and biometrics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Candy Olivia Mawalim|AUTHOR Candy Olivia Mawalim]]^^1^^, [[Kasorn Galajit|AUTHOR Kasorn Galajit]]^^1^^, [[Jessada Karnjana|AUTHOR Jessada Karnjana]]^^2^^, [[Masashi Unoki|AUTHOR Masashi Unoki]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^JAIST, Japan; ^^2^^NECTEC, Thailand</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1703–1707&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Anonymizing speaker individuality is crucial for ensuring voice privacy protection. In this paper, we propose a speaker individuality anonymization system that uses singular value modification and statistical-based decomposition on an x-vector with ensemble regression modeling. An anonymization system requires speaker-to-speaker correspondence (each speaker corresponds to a pseudo-speaker), which may be possible by modifying significant x-vector elements. The significant elements were determined by singular value decomposition and variant analysis. Subsequently, the anonymization process was performed by an ensemble regression model trained using x-vector pools with clustering-based pseudo-targets. The results demonstrated that our proposed anonymization system effectively improves objective verifiability, especially in anonymized trials and anonymized enrollments setting, by preserving similar intelligibility scores with the baseline system introduced in the VoicePrivacy 2020 Challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mohamed Maouche|AUTHOR Mohamed Maouche]]^^1^^, [[Brij Mohan Lal Srivastava|AUTHOR Brij Mohan Lal Srivastava]]^^1^^, [[Nathalie Vauquier|AUTHOR Nathalie Vauquier]]^^1^^, [[Aurélien Bellet|AUTHOR Aurélien Bellet]]^^1^^, [[Marc Tommasi|AUTHOR Marc Tommasi]]^^2^^, [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Inria, France; ^^2^^Université de Lille, France; ^^3^^Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1708–1712&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech anonymization techniques have recently been proposed for preserving speakers’ privacy. They aim at concealing speakers’ identities while preserving the spoken content. In this study, we compare three metrics proposed in the literature to assess the level of privacy achieved. We exhibit through simulation the differences and blindspots of some metrics. In addition, we conduct experiments on real data and state-of-the-art anonymization techniques to study how they behave in a practical scenario. We show that the application-independent log-likelihood-ratio cost function C^^min^^,,llr,, provides a more robust evaluation of privacy than the equal error rate (EER), and that detection-based metrics provide different information from linkability metrics. Interestingly, the results on real data indicate that current anonymization design choices do not induce a regime where the differences between those metrics become apparent.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Brij Mohan Lal Srivastava|AUTHOR Brij Mohan Lal Srivastava]]^^1^^, [[N. Tomashenko|AUTHOR N. Tomashenko]]^^2^^, [[Xin Wang|AUTHOR Xin Wang]]^^3^^, [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]^^4^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^3^^, [[Mohamed Maouche|AUTHOR Mohamed Maouche]]^^1^^, [[Aurélien Bellet|AUTHOR Aurélien Bellet]]^^1^^, [[Marc Tommasi|AUTHOR Marc Tommasi]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Inria, France; ^^2^^LIA (EA 4128), France; ^^3^^NII, Japan; ^^4^^Loria (UMR 7503), France; ^^5^^Université de Lille, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1713–1717&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The recently proposed x-vector based anonymization scheme converts any input voice into that of a random //pseudo-speaker//. In this paper, we present a flexible pseudo-speaker selection technique as a baseline for the first VoicePrivacy Challenge. We explore several design choices for the distance metric between speakers, the region of x-vector space where the pseudo-speaker is picked, and gender selection. To assess the strength of anonymization achieved, we consider attackers using an x-vector based speaker verification system who may use original or anonymized speech for enrollment, depending on their knowledge of the anonymization scheme. The Equal Error Rate (EER) achieved by the attackers and the decoding Word Error Rate (WER) over anonymized data are reported as the measures of privacy and utility. Experiments are performed using datasets derived from LibriSpeech to find the optimal combination of design choices in terms of privacy and utility.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Paul-Gauthier Noé|AUTHOR Paul-Gauthier Noé]]^^1^^, [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]^^1^^, [[Driss Matrouf|AUTHOR Driss Matrouf]]^^1^^, [[N. Tomashenko|AUTHOR N. Tomashenko]]^^1^^, [[Andreas Nautsch|AUTHOR Andreas Nautsch]]^^2^^, [[Nicholas Evans|AUTHOR Nicholas Evans]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIA (EA 4128), France; ^^2^^EURECOM, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1718–1722&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The proliferation of speech technologies and rising privacy legislation calls for the development of privacy preservation solutions for speech applications. These are essential since speech signals convey a wealth of rich, personal and potentially sensitive information. Anonymisation, the focus of the recent VoicePrivacy initiative, is one strategy to protect speaker identity information. Pseudonymisation solutions aim not only to mask the speaker identity and preserve the linguistic content, quality and naturalness, as is the goal of anonymisation, but also to preserve voice distinctiveness. Existing metrics for the assessment of anonymisation are ill-suited and those for the assessment of pseudonymisation are completely lacking. Based upon voice similarity matrices, this paper proposes the first intuitive visualisation of pseudonymisation performance for speech signals and two novel metrics for objective assessment. They reflect the two, key pseudonymisation requirements of de-identification and voice distinctiveness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wendy Lalhminghlui|AUTHOR Wendy Lalhminghlui]], [[Priyankoo Sarmah|AUTHOR Priyankoo Sarmah]]
</p><p class="cpabstractcardaffiliationlist">IIT Guwahati, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1903–1907&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Since the production of fundamental frequency and voicing is determined by the tension in the vocal folds, it is noticed that VOT is affected by the F0 in tone languages. Similarly laryngeal contrasts also affect the F0 of tone. This work studies the interaction between tone and voicing in a lesser-known tone language, Mizo. Mizo has eight stops, that can be categorized into three laryngeal contrasts namely, voiced, voiceless unaspirated, and voiceless aspirated. In the current work, we look into CV syllables produced with the eight Mizo stops with all five vowel categories of the language, produced with four distinct tones in Mizo. The results show a predictable effect of onsets on the F0 of tone and weak effect of tone on VOT duration.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yaru Wu|AUTHOR Yaru Wu]]^^1^^, [[Martine Adda-Decker|AUTHOR Martine Adda-Decker]]^^1^^, [[Lori Lamel|AUTHOR Lori Lamel]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LPP (UMR 7018), France; ^^2^^LIMSI (UPR 3251), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1908–1912&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The present study aims to increase our knowledge of Mandarin lexical tones in fluent speech, more specifically their occurrence frequency distributions and their duration patterns. First, the occurrence frequency of each lexical tone was computed in a large speech corpus (~220 hours). Then the duration of each lexical tone, as well as the impact of word length, syllable position and the prosodic position were investigated. Overall, results show that Tone 3 tends to have the longest duration among all lexical tones. Nonetheless, the factors word length, syllable position and prosodic position are found to impact tone duration. Monosyllabic words exhibit tone durations closer to those of word-final syllables (especially for disyllabic words) than to other syllable positions. Moreover, tone duration tends to be the longest at word’s right boundary in Mandarin, regardless of word length. An effect of prosodic position is also found: the duration of Mandarin lexical tones tends to increase with higher prosodic level. Tone durations are the longest in phrase-final position, followed by word-final position and word-medial position, regardless of the tone nature.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yingming Gao|AUTHOR Yingming Gao]]^^1^^, [[Xinyu Zhang|AUTHOR Xinyu Zhang]]^^1^^, [[Yi Xu|AUTHOR Yi Xu]]^^2^^, [[Jinsong Zhang|AUTHOR Jinsong Zhang]]^^3^^, [[Peter Birkholz|AUTHOR Peter Birkholz]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Dresden, Germany; ^^2^^University College London, UK; ^^3^^BLCU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1913–1917&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The complex f,,0,, variations in continuous speech make it rather difficult to perform automatic recognition of tones in a language like Mandarin Chinese. In this study, we tested the use of target approximation model (TAM) for continuous tone recognition on two datasets. TAM simulates f,,0,, production from the articulatory point of view and so allow to discover the underlying pitch targets from the surface f,,0,, contour. The f,,0,, contour of each tone represented by 30 equidistant points in the first dataset was simulated by the TAM model. Using a support vector machine (SVM) to classify tones showed that, compared to the representation by 30 f,,0,, values, the estimated three-dimensional TAM parameters had a comparable performance in characterizing tone patterns. The TAM model was further tested on the second dataset containing more complex tonal variations. With equal or a fewer number of features, the TAM parameters provided better performance than the coefficients of the cosine transform and a slightly worse performance than the statistical f,,0,, parameters for tone recognition. Furthermore, we investigated bidirectional LSTM neural network for modelling the sequential tonal variations, which proved to be more powerful than the SVM classifier. The BLSTM system incorporating TAM and statistical f,,0,, parameters achieved the best accuracy of 87.56%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Lai|AUTHOR Wei Lai]], [[Aini Li|AUTHOR Aini Li]]
</p><p class="cpabstractcardaffiliationlist">University of Pennsylvania, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1918–1922&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Chinese third tone sandhi (T3S) covaries with the prosodic hierarchy both in the probability of application and in the realization of pitch slope. This paper evaluates whether Mandarin-speaking listeners integrate the covariation between T3S and prosody to resolute sentence ambiguity. Twenty-seven structurally ambiguous sentences were designed, each containing two consecutive T3 syllables situated across a word boundary, and the strength of the T3-intervening boundary crucially differentiates different interpretations of the sentence. The first T3 was manipulated to bear either a low, a shallow-rising, or a sharp-rising pitch. Sixty native Mandarin-speaking listeners heard each of these sentences and chose from two written interpretations the one that was consistent with what they heard. The results show that listeners are more likely to report a major-juncture interpretation when T3S does not apply (low) than when it applies (rising), and in the latter case, when the T3S variant has a sharper rather than shallower slope. Post-hoc analyses show that the T3S application is a more robust parsing cue for short sentences (4–5 syllables long), whereas the pitch shape of T3S is a more efficient parsing cue for longer sentences, indicating that listeners make sophisticated use of tonal variation to facilitate sentence processing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenrui Zhang|AUTHOR Zhenrui Zhang]]^^1^^, [[Fang Hu|AUTHOR Fang Hu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UCASS, China; ^^2^^CASS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1923–1927&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper discusses the phonetics and phonology of tones in Changde Mandarin and focuses on neutral tone in disyllabic words. Acoustic realizations of both citation and neutral tones are examined in terms of fundamental frequencies (F,,0,,), duration, and intensity. And phonetic and phonological descriptions are given on the basis of acoustic data. Acoustic data from 12 speakers show that Changde Mandarin has four lexical tones that distinguish in contour, namely level versus rising, and pitch height, namely high versus low. Neutral tone in Changde Mandarin is different from that in Beijing Mandarin or Standard Chinese. Neutral tone in Changde Mandarin is a reduced form of its citation tone, which is produced with a neutralized pitch height, and a significantly shorter duration and weaker intensity than the control.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ping Cui|AUTHOR Ping Cui]]^^1^^, [[Jianjing Kuang|AUTHOR Jianjing Kuang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Peking University, China; ^^2^^University of Pennsylvania, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1928–1932&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Northeastern Mandarin has a similar lexical tone system as Beijing Mandarin. However, the two dialects significantly diverge at higher prosodic structures. T1 in Northeastern Mandarin always changes to a falling tone in domain-final positions. Previous studies have analyzed this variation as a type of tone sandhi, but we propose it is related to more global prosodic processes such as final lowering. We addressed this issue by conducting both production and perception experiments with native bidialectal speakers of Northeastern Mandarin and Beijing Mandarin. Our findings suggest that T1 variation is essentially a domain-final lowering effect. Other tones also show some kind of final lowering effects. Compared to Beijing Mandarin, Northeastern Mandarin generally has greater global pitch declination and greater final lowering effects. Our perception experiment further showed that both prosodic effects play important roles in identifying the Northeastern Mandarin accent, and final lowering cues are more perceptually salient than the global declination cues. These findings support the notion that pitch declination and final lowering effects are linguistically controlled, not just a by-product of the physiological mechanisms.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Phil Rose|AUTHOR Phil Rose]]
</p><p class="cpabstractcardaffiliationlist">Australian National University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1933–1937&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To provide reference data for studies of voice quality variation in lexical tone, an experiment is described to investigate the nature of intrinsic variation in spectral slope and interharmonic noise for Cantonese citation tones. 23 spectral slope and interharmonic noise measures are extracted with //VoiceSauce// from the tones on /o/ Rhymes of five male and five female speakers of conservative Cantonese. Significant correlation between F0 and both spectral slope and interharmonic noise is demonstrated. It is shown with probabilistic bivariate discriminant analysis that even tones with no extrinsic voice quality differences can be identified at rates considerably above chance from a combination of their spectral slope and interharmonic noise. Male tones, with a minimal error rate of 5.7%, are identified twice as well as female, with a minimal error rate of 14.5%. Combinations with uncorrected spectral slopes perform better than corrected. The best combinations for both sexes involve slope parameters //H2H4// (difference between the 4^^th^^ and 2^^nd^^ harmonic amplitudes); and //H42K// (difference between the 4^^th^^ harmonic and nearest harmonic to 2 kHz), irrespective of noise parameters. The worst combinations involve //CPP// (cepstral peak prominence) as a noise parameter.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ping Tang|AUTHOR Ping Tang]], [[Shanpeng Li|AUTHOR Shanpeng Li]]
</p><p class="cpabstractcardaffiliationlist">NJUST, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1938–1941&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many studies have demonstrated that acoustic contrasts between speech segments (vowels and consonants) were reduced when speaking rate increases, while it was unclear whether tones in tonal languages also undergo similar modifications. Mandarin Chinese is a tonal language, while results regarding the rate effect on Mandarin tones in previous studies were mixed, probably driven by the material difference, i.e., the position of target tones within a sentence. Therefore, the present study examined the effect of speaking rate on Mandarin tones, comparing the pitch contour and tonal contrast of Mandarin tones between normal and fast speech across utterance initial, medial and final positions. The results showed that, relative to normal speech, lexical tones in Mandarin Chinese exhibited overall higher and flatter pitch contours, with smaller tonal space. Moreover, the rate effect on tones did not vary with position. The current results and previous studies on segments thus revealed a universal pattern of speech reduction in fast speech at both segmental and suprasegmental levels.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rubén Pérez-Ramón|AUTHOR Rubén Pérez-Ramón]]^^1^^, [[María Luisa García Lecumberri|AUTHOR María Luisa García Lecumberri]]^^1^^, [[Martin Cooke|AUTHOR Martin Cooke]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad del País Vasco, Spain; ^^2^^Ikerbasque, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2362–2366&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Foreign accent has different effects on speech intelligibility for native and non-native listeners. However, not much is known about the impact of individual foreign-accented segments on listeners with different levels of proficiency in the language. Using a technique developed to generate degrees of segmental foreign accent, this study investigates how native and non-native listeners differing in language proficiency categorise and discriminate degrees of accentedness at the segmental level. Listeners responded to continua ranging from Spanish-accented tokens to English tokens, constructed by inserting accented segments into words. Six continua were chosen, based on known problems faced by Spanish speakers of English. Whether foreign accent categorisation performance differed across native and non-native listeners was found to depend on the status of the segment in the listeners’ first language. For certain sounds both high and low proficiency non-native groups resembled native listener responses. For other sounds, categorisation revealed a clear effect of proficiency, with the high-proficiency group closer to native performance than the low proficiency cohort. This behaviour indicates an ongoing process of new second language phonemic category creation by the more proficient learners.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nari Rhee|AUTHOR Nari Rhee]], [[Jianjing Kuang|AUTHOR Jianjing Kuang]]
</p><p class="cpabstractcardaffiliationlist">University of Pennsylvania, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2407–2411&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Any phonological contrast is distinguished by multiple covarying cues. The role and nature of the cue covariation have been much debated in the literature, identifying the physiological link in production, perceptual integration, and enhancement as the key factors in play. In this study, we test whether the enhancement role of covarying cues are influenced by yet another factor: the interaction between cues at multiple layers of phonological structure. We hypothesize that further enhancement of the cue covariations occurs in contexts where acoustic cues are in competition for phonological contrasts at multiple prosodic levels. To test this, we investigate the enhancement role of the covariation relationship between F0 and spectral cues in Mandarin and Thai tones in different phrasal positions. Exploratory and multidimensional-scaling analyses suggest that in Mandarin, the phrase-final weakening of F0 cues are compensated by enhancing the spectral cues, while in Thai, tonal category-specific enhancement is observed at all phrasal positions. The context- and category-specific enhancement of covarying cues suggests that the language’s phonological structure plays an important role in the fine-tuning of cue covariations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Liu|AUTHOR Yi Liu]], [[Jinghong Ning|AUTHOR Jinghong Ning]]
</p><p class="cpabstractcardaffiliationlist">PolyU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2367–2371&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To perceive a second language (L2), non-native speakers not only have to focus on phonological, lexical and grammatical knowledge, but also need to develop a good mastery of L2 strategic knowledge, including selective attention and language planning. Previous research has found that non-tonal speakers are overtly attentive to segments, while tonal language speakers give more attention to tones. However, it is unclear how different dominant language speakers distribute their attention when processing both segments and tones in non-native speeches. In the current study Cantonese native speakers, Cantonese-dominants, and Urdu-dominants participated in an attention distribution experiment in Cantonese. The results show that the Urdu-dominants retain their L1 attentional strategy in the processing of Cantonese stimuli, classifying the stimuli along segments, while the Cantonese native speakers are more attentive to tones. Moreover, the Cantonese-dominants perform either in monolingual mode or bilingual mode according to different tasks, showing a perceptual flexibility in highly proficient and experienced listeners. The results reveal that language dominance plays a vital role in listeners’ attention distribution. The research also supports the ASP model and hypothesis on bilinguals, proposed by [1].</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mengrou Li|AUTHOR Mengrou Li]], [[Ying Chen|AUTHOR Ying Chen]], [[Jie Cui|AUTHOR Jie Cui]]
</p><p class="cpabstractcardaffiliationlist">NJUST, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2372–2376&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In both American and British English, tense high vowels /i/ and /u/ show extreme positions of the tongue and lips in articulation rather than their lax counterparts /ɪ/ and /ʊ/. However, the tenseness contrast in English is taught to Chinese learners in classroom by most instructors as duration difference — /i/ and /u/ are longer than /ɪ/ and /ʊ/ respectively. The present study therefore examines English production of /i/ vs. /ɪ/ and /u/ vs. /ʊ/ by Chinese elementary students and investigates how L2 beginners actually realize the target vowels and how their production resembles that of their classroom instructors and talkers who recorded their teaching materials. The results show that the students differentiated /i/ from /ɪ/ and /u/ from /ʊ/ mainly in duration and marginally in F2 but not in F1. Their production was found closer to their English teacher’s than the textbook recordings’ and native English speakers’, suggesting the input from the teachers significantly affects the English production of elementary school students in China.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Laura Spinu|AUTHOR Laura Spinu]]^^1^^, [[Jiwon Hwang|AUTHOR Jiwon Hwang]]^^2^^, [[Nadya Pincus|AUTHOR Nadya Pincus]]^^3^^, [[Mariana Vasilita|AUTHOR Mariana Vasilita]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUNY KCC, USA; ^^2^^Stony Brook University, USA; ^^3^^University of Delaware, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2377–2381&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We designed a production experiment to explore the relatively controversial phenomenon of the bilingual advantage. Our focus is on an understudied aspect of bilingual cognition, specifically phonetic learning. We presented 36 participants (17 monolinguals and 19 early bilinguals) living in New York City with an artificially constructed accent of English, differing in four ways from Standard American English. More precisely, the novel accent included a vocalic change (diphthongization of the open-mid front unrounded vowel), consonantal change (tapping of intervocalic liquids), syllable structure change (epenthesis in voiceless s-clusters) and suprasegmental change (a novel intonation pattern in tag questions). After recording their baseline accents, the participants first completed a training task, in which they listened to and then directly imitated sentences heard in the novel accent, and then a testing task, in which they were asked to read the baseline sentences in the accent they had just learned in the absence of any audio prompts. In this paper, we present acoustic results with diphthongization and tag question intonation. Our findings replicate the previously observed bilingual advantage in phonetic learning across the board and extend it to novel learning circumstances. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shammur A. Chowdhury|AUTHOR Shammur A. Chowdhury]]^^1^^, [[Younes Samih|AUTHOR Younes Samih]]^^1^^, [[Mohamed Eldesouki|AUTHOR Mohamed Eldesouki]]^^2^^, [[Ahmed Ali|AUTHOR Ahmed Ali]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^HBKU, Qatar; ^^2^^Concordia University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2382–2386&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The intra-utterance code-switching (CS) is defined as the alternation between two or more languages within the same utterance. Despite the fact that spoken dialectal code-switching (DCS) is more challenging than CS, it remains largely unexplored. In this study, we describe a method to build the first spoken DCS corpus. The corpus is annotated at the token-level minding both linguistic and acoustic cues for dialectal Arabic. For detailed analysis, we study Arabic automatic speech recognition (ASR), Arabic dialect identification (ADI), and natural language processing (NLP) modules for the DCS corpus. Our results highlight the importance of lexical information for discriminating the DCS labels. We observe that the performance of different models is highly dependent on the degree of code-mixing at the token-level as well as its complexity at the utterance-level.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Khia A. Johnson|AUTHOR Khia A. Johnson]], [[Molly Babel|AUTHOR Molly Babel]], [[Robert A. Fuhrman|AUTHOR Robert A. Fuhrman]]
</p><p class="cpabstractcardaffiliationlist">University of British Columbia, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2387–2391&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>When a bilingual switches languages, do they switch their “voice”? Using a new conversational corpus of speech from early Cantonese-English bilinguals (N = 34), this paper examines the talker-specific acoustic signature of bilingual voices. Following prior work in voice quality variation, 24 filter and source-based acoustic measurements are estimated. The analysis summarizes mean differences for these dimensions, in addition to identifying the underlying structure of each talker’s voice across languages with principal components analyses. Canonical redundancy analyses demonstrate that while talkers vary in the degree to which they have the same “voice” across languages, all talkers show strong similarity with themselves.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haobo Zhang|AUTHOR Haobo Zhang]]^^1^^, [[Haihua Xu|AUTHOR Haihua Xu]]^^2^^, [[Van Tung Pham|AUTHOR Van Tung Pham]]^^2^^, [[Hao Huang|AUTHOR Hao Huang]]^^1^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Xinjiang University, China; ^^2^^Temasek Laboratories @ NTU, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2392–2396&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we conduct data selection analysis in building an English-Mandarin code-switching (CS) speech recognition (CSSR) system, which is aimed for a real CSSR contest in China. The overall training sets have three subsets, i.e., a code-switching data set, an English (LibriSpeech) and a Mandarin data set respectively. The code-switching data are Mandarin dominated. First of all, it is found using the overall data yields worse results, and hence data selection study is necessary. Then to exploit monolingual data, we find data matching is crucial. Mandarin data is closely matched with the Mandarin part in the code-switching data, while English data is not. However, Mandarin data only helps on those utterances that are significantly Mandarin-dominated. Besides, there is a balance point, over which more monolingual data will divert the CSSR system, degrading results. Finally, we analyze the effectiveness of combining monolingual data to train a CSSR system with the HMM-DNN hybrid framework. The CSSR system can perform within-utterance code-switch recognition, but it still has a margin with the one trained on code-switching data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dan Du|AUTHOR Dan Du]]^^1^^, [[Xianjin Zhu|AUTHOR Xianjin Zhu]]^^2^^, [[Zhu Li|AUTHOR Zhu Li]]^^1^^, [[Jinsong Zhang|AUTHOR Jinsong Zhang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BLCU, China; ^^2^^Harbin Institute of Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2397–2401&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the area of second language (L2) acquisition, studies of stop consonants have focused on those first language (L1) and L2 with a two-way stop contrast and a three-way stop contrast, few are about languages with a four-way stop contrast. The current study, mainly concerning VOT, investigates how native speakers of a language with a four-way stop contrast acquire the two-way stop contrast in L2. Mandarin presents a two-way stop contrast, which is primarily differentiated by VOT, whereas Urdu presents a four-way stop contrast. Speech perception and production experiments are designed to explore L2 learners with a more complex language system learning a relatively simple language system, and the results show that the speech perception of Mandarin initial stops by native Urdu speakers has no significant difference compared with those by Chinese speakers while the speech production by native Urdu speakers is significantly different from those by Chinese speakers. It demonstrates that L1 exerts different influences on L2 perception and production separately and sometimes good L2 perception doesn’t mean good L2 production. This study has made some contribution about the four-way stop contrast in L2 acquisition and will shed light on L2 learning.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Triantafyllos Afouras|AUTHOR Triantafyllos Afouras]], [[Joon Son Chung|AUTHOR Joon Son Chung]], [[Andrew Zisserman|AUTHOR Andrew Zisserman]]
</p><p class="cpabstractcardaffiliationlist">University of Oxford, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2402–2406&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The goal of this work is to train models that can identify a spoken language just by interpreting the speaker’s lip movements. Our contributions are the following: (i) we show that models can learn to discriminate among 14 different languages using only visual speech information; (ii) we compare different designs in sequence modelling and utterance-level aggregation in order to determine the best architecture for this task; (iii) we investigate the factors that contribute discriminative cues and show that our model indeed solves the problem by finding temporal patterns in mouth movements and not by exploiting spurious correlations. We demonstrate this further by evaluating our models on challenging examples from bilingual speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hao Shi|AUTHOR Hao Shi]]^^1^^, [[Longbiao Wang|AUTHOR Longbiao Wang]]^^1^^, [[Sheng Li|AUTHOR Sheng Li]]^^2^^, [[Chenchen Ding|AUTHOR Chenchen Ding]]^^2^^, [[Meng Ge|AUTHOR Meng Ge]]^^1^^, [[Nan Li|AUTHOR Nan Li]]^^1^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^1^^, [[Hiroshi Seki|AUTHOR Hiroshi Seki]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin University, China; ^^2^^NICT, Japan; ^^3^^Huiyan Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2412–2416&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a novel attention mechanism-based spectrograms fusion system with minimum difference masks (MDMs) estimation for singing voice extraction. Compared with previous works that use a fully connected neural network, our system takes advantage of the multi-head attention mechanism. Specifically, we 1) try a variety of embedding methods of multiple spectrograms as the input of attention mechanisms, which can provide multi-scale correlation information between adjacent frames in the spectrograms; 2) add a regular term to loss function to obtain better continuity of spectrogram; 3) use the phase of the linear fusion waveform to reconstruct the final waveform, which can reduce the impact of the inconsistent spectrogram. Experiments on the MIR-1K dataset show that our system consistently improves the quantitative evaluation by the perceptual evaluation of speech quality, signal-to-distortion ratio, signal-to-interference ratio, and signal-to-artifact ratio.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Feng Deng|AUTHOR Feng Deng]], [[Tao Jiang|AUTHOR Tao Jiang]], [[Xiao-Rui Wang|AUTHOR Xiao-Rui Wang]], [[Chen Zhang|AUTHOR Chen Zhang]], [[Yan Li|AUTHOR Yan Li]]
</p><p class="cpabstractcardaffiliationlist">Kuaishou Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2457–2461&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For single channel speech enhancement, contextual information is very important for accurate speech estimation. In this paper, to capture long-term temporal contexts, we treat speech enhancement as a sequence-to-sequence mapping problem, and propose a noise-aware attention-gated network (NAAGN) for speech enhancement. Firstly, by incorporating deep residual learning and dilated convolutions into U-Net architecture, we present a deep residual U-net (ResUNet), which significantly expand receptive fields to aggregate context information systematically. Secondly, the attention-gated (AG) network is integrated into the ResUNet architecture with minimal computational overhead while furtherly increasing the long-term contexts sensitivity and prediction accuracy. Thirdly, we propose a novel noise-aware multi-task loss function, named weighted mean absolute error (WMAE) loss, in which both speech estimation loss and noise prediction loss are taken into consideration. Finally, the proposed NAAGN model was evaluated on the Voice Bank corpus and DEMAND database, which have been widely applied for speech enhancement by lots of deep learning models. Experimental results indicate that the proposed NAAGN method can achieve a larger segmental SNR improvement, a better speech quality and a higher speech intelligibility than reference methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yen-Ju Lu|AUTHOR Yen-Ju Lu]]^^1^^, [[Chien-Feng Liao|AUTHOR Chien-Feng Liao]]^^1^^, [[Xugang Lu|AUTHOR Xugang Lu]]^^2^^, [[Jeih-weih Hung|AUTHOR Jeih-weih Hung]]^^3^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Academia Sinica; ^^2^^NICT, Japan; ^^3^^National Chi Nan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2417–2421&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In noisy conditions, knowing speech contents facilitates listeners to more effectively suppress background noise components and to retrieve pure speech signals. Previous studies have also confirmed the benefits of incorporating phonetic information in a speech enhancement (SE) system to achieve better denoising performance. To obtain the phonetic information, we usually prepare a phoneme-based acoustic model, which is trained using speech waveforms and phoneme labels. Despite performing well in normal noisy conditions, when operating in very noisy conditions, however, the recognized phonemes may be erroneous and thus misguide the SE process. To overcome the limitation, this study proposes to incorporate the broad phonetic class (BPC) information into the SE process. We have investigated three criteria to build the BPC, including two knowledge-based criteria: place and manner of articulatory and one data-driven criterion. Moreover, the recognition accuracies of BPCs are much higher than that of phonemes, thus providing more accurate phonetic information to guide the SE process under very noisy conditions. Experimental results demonstrate that the proposed SE with the BPC information framework can achieve notable performance improvements over the baseline system and an SE system using monophonic information in terms of both speech quality intelligibility on the TIMIT dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andong Li|AUTHOR Andong Li]], [[Chengshi Zheng|AUTHOR Chengshi Zheng]], [[Cunhang Fan|AUTHOR Cunhang Fan]], [[Renhua Peng|AUTHOR Renhua Peng]], [[Xiaodong Li|AUTHOR Xiaodong Li]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2422–2426&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For continuous speech processing, dynamic attention is helpful in preferential processing, which has already been shown by the auditory dynamic attending theory. Accordingly, we propose a framework combining dynamic attention and recursive learning together called DARCN for monaural speech enhancement. Apart from a major noise reduction network, we design a separated sub-network, which adaptively generates the attention distribution to control the information flow throughout the major network. Recursive learning is introduced to dynamically reduce the number of trainable parameters by reusing a network for multiple stages, where the intermediate output in each stage is refined with a memory mechanism. By doing so, a more flexible and better estimation can be obtained. We conduct experiments on TIMIT corpus. Experimental results show that the proposed architecture obtains consistently better performance than recent state-of-the-art models in terms of both PESQ and STOI scores.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongjiang Yu|AUTHOR Hongjiang Yu]]^^1^^, [[Wei-Ping Zhu|AUTHOR Wei-Ping Zhu]]^^1^^, [[Yuhong Yang|AUTHOR Yuhong Yang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Concordia University, Canada; ^^2^^Wuhan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2427–2431&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement has found many applications concerning robust speech processing. A masking based algorithm, as an important method of speech enhancement, aims to retain the speech dominant components and suppress the noise dominant parts of the noisy speech. In this paper, we derive a new type of mask: constrained ratio mask (CRM), which can better control the trade-off between speech distortion and residual noise in the enhanced speech. A deep neural network (DNN) is then employed for CRM estimation in noisy conditions. The estimated CRM is finally applied to the noisy speech for denoising. Experimental results show that the enhanced speech from the new masking scheme yields an improved speech quality over three existing masks under various noisy conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chi-Chang Lee|AUTHOR Chi-Chang Lee]]^^1^^, [[Yu-Chen Lin|AUTHOR Yu-Chen Lin]]^^1^^, [[Hsuan-Tien Lin|AUTHOR Hsuan-Tien Lin]]^^1^^, [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]^^2^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Taiwan University; ^^2^^Academia Sinica</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2432–2436&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Numerous noise adaptation techniques have been proposed to fine-tune deep-learning models in speech enhancement (SE) for mismatched noise environments. Nevertheless, adaptation to a new environment may lead to catastrophic forgetting of the previously learned environments. The catastrophic forgetting issue degrades the performance of SE in real-world embedded devices, which often revisit previous noise environments. The nature of embedded devices does not allow solving the issue with additional storage of all pre-trained models or earlier training data. In this paper, we propose a regularization-based incremental learning SE (SERIL) strategy, complementing existing noise adaptation strategies without using additional storage. With a regularization constraint, the parameters are updated to the new noise environment while retaining the knowledge of the previous noise environments. The experimental results show that, when faced with a new noise domain, the SERIL model outperforms the unadapted SE model. Meanwhile, compared with the current adaptive technique based on fine-tuning, the SERIL model can reduce the forgetting of previous noise environments by 52%. The results verify that the SERIL model can effectively adjust itself to new noise environments while overcoming the catastrophic forgetting issue. The results make SERIL a favorable choice for real-world SE applications, where the noise environment changes frequently.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yoshiaki Bando|AUTHOR Yoshiaki Bando]]^^1^^, [[Kouhei Sekiguchi|AUTHOR Kouhei Sekiguchi]]^^2^^, [[Kazuyoshi Yoshii|AUTHOR Kazuyoshi Yoshii]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^AIST, Japan; ^^2^^RIKEN, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2437–2441&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a neural speech enhancement method that has a statistical feedback mechanism based on a denoising variational autoencoder (VAE). Deep generative models of speech signals have been combined with unsupervised noise models for enhancing speech robustly regardless of the condition mismatch from the training data. This approach, however, often yields unnatural speech-like noise due to the unsuitable prior distribution on the latent speech representations. To mitigate this problem, we use a denoising VAE whose encoder estimates the latent vectors of clean speech from an input mixture signal. This encoder network is utilized as a prior distribution of the probabilistic generative model of the input mixture, and its condition mismatch is handled in a Bayesian manner. The speech signal is estimated by updating the latent vectors to fit the input mixture while noise is estimated by a nonnegative matrix factorization model. To efficiently train the encoder network, we also propose a multi-task learning of the denoising VAE with the standard mask-based enhancement. The experimental results show that our method outperforms the existing mask-based and generative enhancement methods in unknown conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ahmet E. Bulut|AUTHOR Ahmet E. Bulut]]^^1^^, [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Dallas, USA; ^^2^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2442–2446&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech signal reverberation due to reflections in a physical obstacle is one of the main difficulties in speech processing as well as the presence of non-stationary background noise. In this study we explore DNN-based single-channel speech dereverberation with state-of-the-art performance comparisons. We propose a CNN auto-encoder architecture with skip connections focusing on real-time and low-latency applications. The proposed system is evaluated with the REVERB challenge dataset that includes simulated and real reverberated speech samples. Our experimental results show that the proposed system has superior results on the challenge evaluation dataset as opposed to a baseline system that uses deep neural network (DNN) based weighted prediction error (WPE) algorithm. We also extend the comparison with state of the art systems in terms of most commonly used objective metrics and our system achieves better results in the most of objective metrics. Moreover a latency analysis of the proposed system is performed and trade-off between processing time and performance is examined.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dung N. Tran|AUTHOR Dung N. Tran]], [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2447–2451&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In data-driven speech enhancement frameworks, learning informative representations is crucial to obtain a high-quality estimate of the target speech. State-of-the-art speech enhancement methods based on deep neural networks (DNN) commonly learn a single embedding from the noisy input to predict clean speech. This compressed representation inevitably contains both noise and speech information leading to speech distortion and poor noise reduction performance. To alleviate this issue, we proposed to learn from the noisy input separate embeddings for speech and noise and introduced a subspace affinity loss function to prevent information leaking between the two representations. We rigorously proved that minimizing this loss function yields maximally uncorrelated speech and noise representations, which can block information leaking. We empirically showed that our proposed framework outperforms traditional and state-of-the-art speech enhancement methods in various unseen nonstationary noise environments. Our results suggest that learning uncorrelated speech and noise embeddings can improve noise reduction and reduces speech distortion in speech enhancement applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haoyu Li|AUTHOR Haoyu Li]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]
</p><p class="cpabstractcardaffiliationlist">NII, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2452–2456&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years, speech enhancement (SE) has achieved impressive progress with the success of deep neural networks (DNNs). However, the DNN approach usually fails to generalize well to unseen environmental noise that is not included in the training. To address this problem, we propose “noise tokens” (NTs), which are a set of neural noise templates that are jointly trained with the SE system. NTs dynamically capture the environment variability and thus enable the DNN model to handle various environments to produce STFT magnitude with higher quality. Experimental results show that using NTs is an effective strategy that consistently improves the generalization ability of SE systems across different DNN architectures. Furthermore, we investigate applying a state-of-the-art neural vocoder to generate waveform instead of traditional inverse STFT (ISTFT). Subjective listening tests show the residual noise can be significantly suppressed through mel-spectrogram correction and vocoder-based waveform synthesis.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anastassia Loukina|AUTHOR Anastassia Loukina]], [[Keelan Evanini|AUTHOR Keelan Evanini]], [[Matthew Mulholland|AUTHOR Matthew Mulholland]], [[Ian Blood|AUTHOR Ian Blood]], [[Klaus Zechner|AUTHOR Klaus Zechner]]
</p><p class="cpabstractcardaffiliationlist">Educational Testing Service, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1942–1946&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The COVID-19 pandemic has led to a dramatic increase in the use of face masks worldwide. Face coverings can affect both acoustic properties of the signal as well as speech patterns and have unintended effects if the person wearing the mask attempts to use speech processing technologies. In this paper we explore the impact of wearing face masks on the automated assessment of English language proficiency. We use a dataset from a large-scale speaking test for which test-takers were required to wear face masks during the test administration, and we compare it to a matched control sample of test-takers who took the same test before the mask requirements were put in place. We find that the two samples differ across a range of acoustic measures and also show a small but significant difference in speech patterns. However, these differences do not lead to differences in human or automated scores of English language proficiency. Several measures of bias showed no differences in scores between the two groups.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ximin Li|AUTHOR Ximin Li]], [[Xiaodong Wei|AUTHOR Xiaodong Wei]], [[Xiaowei Qin|AUTHOR Xiaowei Qin]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1987–1991&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword Spotting (KWS) plays a vital role in human-computer interaction for smart on-device terminals and service robots. It remains challenging to achieve the trade-off between small footprint and high accuracy for KWS task. In this paper, we explore the application of multi-scale temporal modeling to the small-footprint keyword spotting task. We propose a multi-branch temporal convolution module (MTConv), a CNN block consisting of multiple temporal convolution filters with different kernel sizes, which enriches temporal feature space. Besides, taking advantage of temporal and depthwise convolution, a temporal efficient neural network (TENet) is designed for KWS system¹. Based on the purposed model, we replace standard temporal convolution layers with MTConvs that can be trained for better performance. While at the inference stage, the MTConv can be equivalently converted to the base convolution architecture, so that no extra parameters and computational costs are added compared to the base model. The results on Google Speech Command Dataset show that one of our models trained with MTConv performs the accuracy of 96.8% with only 100K parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mohamed Mhiri|AUTHOR Mohamed Mhiri]], [[Samuel Myer|AUTHOR Samuel Myer]], [[Vikrant Singh Tomar|AUTHOR Vikrant Singh Tomar]]
</p><p class="cpabstractcardaffiliationlist">Fluent.ai, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1947–1951&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years, developing a speech understanding system that classifies a waveform to structured data, such as intents and slots, without first transcribing the speech to text has emerged as an interesting research problem. This work proposes such as system with an additional constraint of designing a system that has a small enough footprint to run on small micro-controllers and embedded systems with minimal latency. Given a streaming input speech signal, the proposed system can process it segment-by-segment without the need to have the entire stream at the moment of processing. The proposed system is evaluated on the publicly available Fluent Speech Commands dataset. Experiments show that the proposed system yields state-of-the-art performance with the advantage of low latency and a much smaller model when compared to other published works on the same task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joe Wang|AUTHOR Joe Wang]], [[Rajath Kumar|AUTHOR Rajath Kumar]], [[Mike Rodehorst|AUTHOR Mike Rodehorst]], [[Brian Kulis|AUTHOR Brian Kulis]], [[Shiv Naga Prasad Vitaladevuni|AUTHOR Shiv Naga Prasad Vitaladevuni]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1952–1956&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose an audio-based wakeword-independent verification model to determine whether a wakeword spotting model correctly woke and should respond or incorrectly woke and should not respond. Our model works on any wakeword-initiated audio, independent of the wakeword by operating only on the audio surrounding the wakeword, yielding a wakeword agnostic model. This model is based on two key assumptions: that audio surrounding the wakeword is informative to determine if the user intended to wake the device and that this audio is independent of the wakeword itself. We show experimentally that on wakewords not included in the training set, our model trained without examples or knowledge of the wakeword is able to achieve verification performance comparable to models trained on 5,000 to 10,000 annotated examples of the new wakeword.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tyler Vuong|AUTHOR Tyler Vuong]], [[Yangyang Xia|AUTHOR Yangyang Xia]], [[Richard M. Stern|AUTHOR Richard M. Stern]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1957–1961&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice Type Discrimination (VTD) refers to discrimination between regions in a recording where speech was produced by speakers that are physically within proximity of the recording device (“Live Speech”) from speech and other types of audio that were played back such as traffic noise and television broadcasts (“Distractor Audio”). In this work, we propose a deep-learning-based VTD system that features an initial layer of learnable spectro-temporal receptive fields (STRFs). Our approach is also shown to provide very strong performance on a similar spoofing detection task in the ASVspoof 2019 challenge. We evaluate our approach on a new standardized VTD database that was collected to support research in this area. In particular, we study the effect of using learnable STRFs compared to static STRFs or unconstrained kernels. We also show that our system consistently improves a competitive baseline system across a wide range of signal-to-noise ratios on spoofing detection in the presence of VTD distractor noise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuo-Yiin Chang|AUTHOR Shuo-Yiin Chang]], [[Bo Li|AUTHOR Bo Li]], [[David Rybach|AUTHOR David Rybach]], [[Yanzhang He|AUTHOR Yanzhang He]], [[Wei Li|AUTHOR Wei Li]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Trevor Strohman|AUTHOR Trevor Strohman]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1962–1966&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Latency is a crucial metric for streaming speech recognition systems. In this paper, we reduce latency by fetching responses early based on the //partial// recognition results and refer to it as //prefetching//. Specifically, prefetching works by submitting partial recognition results for subsequent processing such as obtaining assistant server responses or second-pass rescoring //before// the recognition result is finalized. If the partial result matches the final recognition result, the early fetched response can be delivered to the user instantly. This effectively speeds up the system by saving the execution latency that typically happens after recognition is completed.

Prefetching can be triggered multiple times for a single query, but this leads to multiple rounds of downstream processing and increases the computation costs. It is hence desirable to fetch the result sooner but meanwhile limiting the number of prefetches. To achieve the best trade-off between latency and computation cost, we investigated a series of prefetching decision models including decoder silence based prefetching, acoustic silence based prefetching and end-to-end prefetching.

In this paper, we demonstrate the proposed prefetching mechanism reduced latency by ~200 ms for a system that consists of a streaming first pass model using recurrent neural network transducer and a non-streaming second pass rescoring model using Listen, Attend and Spell. We observe that the end-to-end prefetching provides the best trade-off between cost and latency and is 120 ms faster compared to silence based prefetching at a fixed prefetch rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jingsong Wang|AUTHOR Jingsong Wang]]^^1^^, [[Tom Ko|AUTHOR Tom Ko]]^^2^^, [[Zhen Xu|AUTHOR Zhen Xu]]^^1^^, [[Xiawei Guo|AUTHOR Xiawei Guo]]^^1^^, [[Souxiang Liu|AUTHOR Souxiang Liu]]^^1^^, [[Wei-Wei Tu|AUTHOR Wei-Wei Tu]]^^1^^, [[Lei Xie|AUTHOR Lei Xie]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^4Paradigm, China; ^^2^^SUSTech, China; ^^3^^Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1967–1971&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The AutoSpeech challenge calls for automated machine learning (AutoML) solutions to automate the process of applying machine learning to speech processing tasks. These tasks, which cover a large variety of domains, will be shown to the automated system in a random order. Each time when the tasks are switched, the information of the new task will be hinted with its corresponding training set. Thus, every submitted solution should contain an adaptation routine which adapts the system to the new task. Compared to the first edition, the 2020 edition includes advances of 1) more speech tasks, 2) noisier data in each task, 3) a modified evaluation metric. This paper outlines the challenge and describe the competition protocol, datasets, evaluation metric, starting kit, and baseline systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rajath Kumar|AUTHOR Rajath Kumar]], [[Mike Rodehorst|AUTHOR Mike Rodehorst]], [[Joe Wang|AUTHOR Joe Wang]], [[Jiacheng Gu|AUTHOR Jiacheng Gu]], [[Brian Kulis|AUTHOR Brian Kulis]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1972–1976&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Wakeword detection is responsible for switching on downstream systems in a voice-activated device. To prevent a response when the wakeword is detected by mistake, a secondary network is often utilized to verify the detected wakeword. Published verification approaches are formulated based on Automatic Speech Recognition (ASR) biased towards the wakeword. This approach has several drawbacks, including high model complexity and the necessity of large vocabulary training data. To address these shortcomings, we propose to use a large receptive field (LRF) word-level wakeword model, and in particular, a convolutional-recurrent-attention (CRA) network. CRA networks use a strided small receptive field convolutional front-end followed by fixed time-step recurrent layers optimized to model the temporal phonetic dependencies within the wakeword. We experimentally show that this type of modeling helps the system to be robust to errors in the location of the wakeword as estimated by the detection network. The proposed CRA network significantly outperforms previous baselines, including an LRF whole-word convolutional network and a 2-stage DNN-HMM system. Additionally, we study the importance of pre- and post-wakeword context. Finally, the CRA network has significantly fewer model parameters and multiplies, which makes it suitable for real-world production applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuma Koizumi|AUTHOR Yuma Koizumi]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Kyosuke Nishida|AUTHOR Kyosuke Nishida]], [[Masahiro Yasuda|AUTHOR Masahiro Yasuda]], [[Shoichiro Saito|AUTHOR Shoichiro Saito]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1977–1981&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>One of the problems with automated audio captioning (AAC) is the indeterminacy in word selection corresponding to the audio event/scene. Since one acoustic event/scene can be described with several words, it results in a combinatorial explosion of possible captions and difficulty in training. To solve this problem, we propose a Transformer-based audio-captioning model with keyword estimation called //TRACKE//. It simultaneously solves the word-selection indeterminacy problem with the main task of AAC while executing the sub-task of acoustic event detection/acoustic scene classification (i.e., keyword estimation). TRACKE estimates keywords, which comprise a word set corresponding to audio events/scenes in the input audio, and generates the caption while referring to the estimated keywords to reduce word-selection indeterminacy. Experimental results on a public AAC dataset indicate that TRACKE achieved state-of-the-art performance and successfully estimated both the caption and its keywords.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tong Mo|AUTHOR Tong Mo]]^^1^^, [[Yakun Yu|AUTHOR Yakun Yu]]^^1^^, [[Mohammad Salameh|AUTHOR Mohammad Salameh]]^^2^^, [[Di Niu|AUTHOR Di Niu]]^^1^^, [[Shangling Jui|AUTHOR Shangling Jui]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Alberta, Canada; ^^2^^Huawei Technologies, Canada; ^^3^^Huawei Technologies, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1982–1986&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural networks have recently become a popular solution to keyword spotting systems, which enable the control of smart devices via voice. In this paper, we apply neural architecture search to search for convolutional neural network models that can help boost the performance of keyword spotting based on features extracted from acoustic signals while maintaining an acceptable memory footprint. Specifically, we use differentiable architecture search techniques to search for operators and their connections in a predefined cell search space. The found cells are then scaled up in both depth and width to achieve competitive performance. We evaluated the proposed method on Google’s Speech Commands Dataset and achieved a state-of-the-art accuracy of over 97% on the setting of 12-class utterance classification commonly reported in the literature.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xin Wang|AUTHOR Xin Wang]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]
</p><p class="cpabstractcardaffiliationlist">NII, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1992–1996&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural source-filter (NSF) waveform models generate speech waveforms by morphing sine-based source signals through dilated convolution in the time domain. Although the sine-based source signals help the NSF models to produce voiced sounds with specified pitch, the sine shape may constrain the generated waveform when the target voiced sounds are less periodic. In this paper, we propose a more flexible source signal called cyclic noise, a quasi-periodic noise sequence given by the convolution of a pulse train and a static random noise with a trainable decaying rate that controls the signal shape. We further propose a masked spectral loss to guide the NSF models to produce periodic voiced sounds from the cyclic noise-based source signal. Results from a large-scale listening test demonstrated the effectiveness of the cyclic noise and the masked spectral loss on speaker-independent NSF models in copy-synthesis experiments on the CMU ARCTIC database.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mano Ranjith Kumar M.|AUTHOR Mano Ranjith Kumar M.]], [[Sudhanshu Srivastava|AUTHOR Sudhanshu Srivastava]], [[Anusha Prakash|AUTHOR Anusha Prakash]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]
</p><p class="cpabstractcardaffiliationlist">IIT Madras, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2037–2041&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Conventional text-to-speech (TTS) synthesis requires extensive linguistic processing for producing quality output. The advent of end-to-end (E2E) systems has caused a relocation in the paradigm with better synthesized voices. However, hidden Markov model (HMM) based systems are still popular due to their fast synthesis time, robustness to less training data, and flexible adaptation of voice characteristics, speaking styles, and emotions.

This paper proposes a technique that combines the classical parametric HMM-based TTS framework (HTS) with the neural-network-based Waveglow vocoder using histogram equalization (HEQ) in a low resource environment. The two paradigms are combined by performing HEQ across mel-spectrograms extracted from HTS generated audio and source spectra of training data. During testing, the synthesized mel-spectrograms are mapped to the source spectrograms using the learned HEQ. Experiments are carried out on Hindi male and female dataset of the Indic TTS database. Systems are evaluated based on degradation mean opinion scores (DMOS). Results indicate that the synthesis quality of the hybrid system is better than that of the conventional HTS system. These results are quite promising as they pave way to good quality TTS systems with less data compared to E2E systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jen-Yu Liu|AUTHOR Jen-Yu Liu]]^^1^^, [[Yu-Hua Chen|AUTHOR Yu-Hua Chen]]^^1^^, [[Yin-Cheng Yeh|AUTHOR Yin-Cheng Yeh]]^^1^^, [[Yi-Hsuan Yang|AUTHOR Yi-Hsuan Yang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Taiwan AI Labs; ^^2^^Taiwan AI Labs</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1997–2001&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In a recent paper, we have presented a generative adversarial network (GAN)-based model for unconditional generation of the mel-spectrograms of singing voices. As the generator of the model is designed to take a variable-length sequence of noise vectors as input, it can generate mel-spectrograms of variable length. However, our previous listening test shows that the quality of the generated audio leaves room for improvement. The present paper extends and expands that previous work in the following aspects. First, we employ a hierarchical architecture in the generator to induce some structure in the temporal dimension. Second, we introduce a cycle regularization mechanism to the generator to avoid mode collapse. Third, we evaluate the performance of the new model not only for generating singing voices, but also for generating speech voices. Evaluation result shows that new model outperforms the prior one both objectively and subjectively. We also employ the model to unconditionally generate sequences of piano and violin music and find the result promising. Audio examples, as well as the code for implementing our model, will be publicly available online upon paper publication.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Toru Nakashika|AUTHOR Toru Nakashika]]
</p><p class="cpabstractcardaffiliationlist">University of Electro-Communications, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2002–2006&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years, variational autoencoders (VAEs) have been attracting interest for many applications and generative tasks. Although the VAE is one of the most powerful deep generative models, it still has difficulty representing complex-valued data such as the complex spectra of speech. In speech synthesis, we usually use the VAE to encode Mel-cepstra, or raw amplitude spectra, from a speech signal into normally distributed latent features and then synthesize the speech from the reconstruction by using the Griffin-Lim algorithm or other vocoders. Such inputs are originally calculated from complex spectra but lack the phase information, which leads to degradation when recovering speech. In this work, we propose a novel generative model to directly encode the complex spectra by extending the conventional VAE. The proposed model, which we call the complex-valued VAE (CVAE), consists of two complex-valued neural networks (CVNNs) of an encoder and a decoder. In the CVAE, not only the inputs and the parameters of the encoder and decoder but also the latent features are defined as complex-valued to preserve the phase information throughout the network. The results of our speech encoding experiments demonstrated the effectiveness of the CVAE compared to the conventional VAE in both objective and subjective criteria.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seungwoo Choi|AUTHOR Seungwoo Choi]], [[Seungju Han|AUTHOR Seungju Han]], [[Dongyoung Kim|AUTHOR Dongyoung Kim]], [[Sungjoo Ha|AUTHOR Sungjoo Ha]]
</p><p class="cpabstractcardaffiliationlist">Hyperconnect, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2007–2011&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>On account of growing demands for personalization, the need for a so-called few-shot TTS system that clones speakers with only a few data is emerging. To address this issue, we propose Attentron, a few-shot TTS model that clones voices of speakers unseen during training. It introduces two special encoders, each serving different purposes. A fine-grained encoder extracts variable-length style information via an attention mechanism, and a coarse-grained encoder greatly stabilizes the speech synthesis, circumventing unintelligible gibberish even for synthesizing speech of unseen speakers. In addition, the model can scale out to an arbitrary number of reference audios to improve the quality of the synthesized speech. According to our experiments, including a human evaluation, the proposed model significantly outperforms state-of-the-art models when generating speech for unseen speakers in terms of speaker similarity and quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hyeong Rae Ihm|AUTHOR Hyeong Rae Ihm]], [[Joun Yeop Lee|AUTHOR Joun Yeop Lee]], [[Byoung Jin Choi|AUTHOR Byoung Jin Choi]], [[Sung Jun Cheon|AUTHOR Sung Jun Cheon]], [[Nam Soo Kim|AUTHOR Nam Soo Kim]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2012–2016&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent End-to-end text-to-speech (TTS) systems based on the deep neural network (DNN) have shown the state-of-the-art performance on the speech synthesis field. Especially, the attention-based sequence-to-sequence models have improved the quality of the alignment between the text and spectrogram successfully. Leveraging such improvement, speech synthesis using a Transformer network was reported to generate humanlike speech audio. However, such sequence-to-sequence models require intensive computing power and memory during training. The attention scores are calculated over the entire key at every query sequence, which increases memory usage. To mitigate this issue, we propose Reformer-TTS, the model using a Reformer network which utilizes the locality-sensitive hashing attention and the reversible residual network. As a result, we show that the Reformer network consumes almost twice smaller memory margin as the Transformer, which leads to the fast convergence of training end-to-end TTS system. We demonstrate such advantages with memory usage, objective, and subjective performance evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takuhiro Kaneko|AUTHOR Takuhiro Kaneko]], [[Hirokazu Kameoka|AUTHOR Hirokazu Kameoka]], [[Kou Tanaka|AUTHOR Kou Tanaka]], [[Nobukatsu Hojo|AUTHOR Nobukatsu Hojo]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2017–2021&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Non-parallel voice conversion (VC) is a technique for learning mappings between source and target speeches without using a parallel corpus. Recently, cycle-consistent adversarial network (CycleGAN)-VC and CycleGAN-VC2 have shown promising results regarding this problem and have been widely used as benchmark methods. However, owing to the ambiguity of the effectiveness of CycleGAN-VC/VC2 for mel-spectrogram conversion, they are typically used for mel-cepstrum conversion even when comparative methods employ mel-spectrogram as a conversion target. To address this, we examined the applicability of CycleGAN-VC/VC2 to mel-spectrogram conversion. Through initial experiments, we discovered that their direct applications compromised the time-frequency structure that should be preserved during conversion. To remedy this, we propose CycleGAN-VC3, an improvement of CycleGAN-VC2 that incorporates time-frequency adaptive normalization (TFAN). Using TFAN, we can adjust the scale and bias of the converted features while reflecting the time-frequency structure of the source mel-spectrogram. We evaluated CycleGAN-VC3 on inter-gender and intra-gender non-parallel VC. A subjective evaluation of naturalness and similarity showed that for every VC pair, CycleGAN-VC3 outperforms or is competitive with the two types of CycleGAN-VC2, one of which was applied to mel-cepstrum and the other to mel-spectrogram.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nikolaos Ellinas|AUTHOR Nikolaos Ellinas]]^^1^^, [[Georgios Vamvoukakis|AUTHOR Georgios Vamvoukakis]]^^1^^, [[Konstantinos Markopoulos|AUTHOR Konstantinos Markopoulos]]^^1^^, [[Aimilios Chalamandaris|AUTHOR Aimilios Chalamandaris]]^^1^^, [[Georgia Maniati|AUTHOR Georgia Maniati]]^^1^^, [[Panos Kakoulidis|AUTHOR Panos Kakoulidis]]^^1^^, [[Spyros Raptis|AUTHOR Spyros Raptis]]^^1^^, [[June Sig Sung|AUTHOR June Sig Sung]]^^2^^, [[Hyoungmin Park|AUTHOR Hyoungmin Park]]^^2^^, [[Pirros Tsiakoulis|AUTHOR Pirros Tsiakoulis]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Samsung, Greece; ^^2^^Samsung, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2022–2026&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents an end-to-end text-to-speech system with low latency on a CPU, suitable for real-time applications. The system is composed of an autoregressive attention-based sequence-to-sequence acoustic model and the LPCNet vocoder for waveform generation. An acoustic model architecture that adopts modules from both the Tacotron 1 and 2 models is proposed, while stability is ensured by using a recently proposed purely location-based attention mechanism, suitable for arbitrary sentence length generation. During inference, the decoder is unrolled and acoustic feature generation is performed in a streaming manner, allowing for a nearly constant latency which is independent from the sentence length. Experimental results show that the acoustic model can produce feature sequences with minimal latency about 31 times faster than real-time on a computer CPU and 6.5 times on a mobile CPU, enabling it to meet the conditions required for real-time applications on both devices. The full end-to-end system can generate almost natural quality speech, which is verified by listening tests.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chengzhu Yu|AUTHOR Chengzhu Yu]]^^1^^, [[Heng Lu|AUTHOR Heng Lu]]^^1^^, [[Na Hu|AUTHOR Na Hu]]^^2^^, [[Meng Yu|AUTHOR Meng Yu]]^^1^^, [[Chao Weng|AUTHOR Chao Weng]]^^1^^, [[Kun Xu|AUTHOR Kun Xu]]^^2^^, [[Peng Liu|AUTHOR Peng Liu]]^^2^^, [[Deyi Tuo|AUTHOR Deyi Tuo]]^^2^^, [[Shiyin Kang|AUTHOR Shiyin Kang]]^^2^^, [[Guangzhi Lei|AUTHOR Guangzhi Lei]]^^2^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Dong Yu|AUTHOR Dong Yu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tencent, USA; ^^2^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2027–2031&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a robust and effective speech synthesis system that generates highly natural speech. The key component of proposed system is Duration Informed Attention Network (DurIAN), an autoregressive model in which the alignments between the input text and the output acoustic features are inferred from a duration model. This is different from the attention mechanism used in existing end-to-end speech synthesis systems that accounts for various unavoidable artifacts. To improve the audio generation efficiency of neural vocoders, we also propose a multi-band audio generation framework exploiting the sparseness characteristics of neural network. With proposed multi-band processing framework, the total computational complexity of WaveRNN model can be effectively reduced from 9.8 to 3.6 GFLOPS without any performance loss. Finally, we show that proposed DurIAN system could generate highly natural speech that is on par with current state of the art end-to-end systems, while being robust and stable at the same time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kentaro Mitsui|AUTHOR Kentaro Mitsui]], [[Tomoki Koriyama|AUTHOR Tomoki Koriyama]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]
</p><p class="cpabstractcardaffiliationlist">University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2032–2036&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multi-speaker speech synthesis is a technique for modeling multiple speakers’ voices with a single model. Although many approaches using deep neural networks (DNNs) have been proposed, DNNs are prone to overfitting when the amount of training data is limited. We propose a framework for multi-speaker speech synthesis using deep Gaussian processes (DGPs); a DGP is a deep architecture of Bayesian kernel regressions and thus robust to overfitting. In this framework, speaker information is fed to duration/acoustic models using speaker codes. We also examine the use of deep Gaussian process latent variable models (DGPLVMs). In this approach, the representation of each speaker is learned simultaneously with other model parameters, and therefore the similarity or dissimilarity of speakers is considered efficiently. We experimentally evaluated two situations to investigate the effectiveness of the proposed methods. In one situation, the amount of data from each speaker is balanced (speaker-balanced), and in the other, the data from certain speakers are limited (speaker-imbalanced). Subjective and objective evaluation results showed that both the DGP and DGPLVM synthesize multi-speaker speech more effective than a DNN in the speaker-balanced situation. We also found that the DGPLVM outperforms the DGP significantly in the speaker-imbalanced situation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kshitiz Kumar|AUTHOR Kshitiz Kumar]], [[Chaojun Liu|AUTHOR Chaojun Liu]], [[Yifan Gong|AUTHOR Yifan Gong]], [[Jian Wu|AUTHOR Jian Wu]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2107–2111&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work we develop a simple, efficient, and compact automatic speech recognition (ASR) model based on purely 1-dimensional row convolution (RC) operation. We refer to our proposed model as 1-dim row-convolution LSTM (RC-LSTM), where we embed limited future information to standard UniLSTMs in 1-dim RC operation. We target fast streaming ASR solutions and establish ASR accuracy parity with latency-control bidirectional-LSTM (LC-BLSTM). We develop an application of future information at ASR features and hidden layer stages. We study connections with related techniques, analyze tradeoffs and recommend uniform future lookahead to all hidden layers. We argue that our architecture implicitly factorizes training into orthogonal time and “frequency” dimensions for an effective learning on large scale tasks. We conduct a series of experiments on medium scale with 6k hrs of English corpus, as well as, large scale with 60k hrs training. We demonstrate our findings across unified ASR tasks. Compared to UniLSTM model, RC-LSTM achieved 16% relative reduction in word error rate (WER). RC-LSTM also achieved accuracy parity with LC-BLSTM on large scale tasks at significantly lower latency and computational cost.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vikas Joshi|AUTHOR Vikas Joshi]]^^1^^, [[Rui Zhao|AUTHOR Rui Zhao]]^^2^^, [[Rupesh R. Mehta|AUTHOR Rupesh R. Mehta]]^^1^^, [[Kshitiz Kumar|AUTHOR Kshitiz Kumar]]^^2^^, [[Jinyu Li|AUTHOR Jinyu Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Microsoft, India; ^^2^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2152–2156&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transfer learning (TL) is widely used in conventional hybrid automatic speech recognition (ASR) system, to transfer the knowledge from source to target language. TL can be applied to end-to-end (E2E) ASR system such as recurrent neural network transducer (RNN-T) models, by initializing the encoder and/or prediction network of the target language with the pre-trained models from source language. In the hybrid ASR system, transfer learning is typically done by initializing the target language acoustic model (AM) with source language AM. Several transfer learning strategies exist in the case of the RNN-T framework, depending upon the choice of the initialization model for encoder and prediction networks. This paper presents a comparative study of four different TL methods for RNN-T framework. We show 10%–17% relative word error rate reduction with different TL methods over randomly initialized RNN-T model. We also study the impact of TL with varying amount of training data ranging from 50 hours to 1000 hours and show the efficacy of TL for languages with a very small amount of training data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chengyi Wang|AUTHOR Chengyi Wang]]^^1^^, [[Yu Wu|AUTHOR Yu Wu]]^^2^^, [[Liang Lu|AUTHOR Liang Lu]]^^3^^, [[Shujie Liu|AUTHOR Shujie Liu]]^^2^^, [[Jinyu Li|AUTHOR Jinyu Li]]^^3^^, [[Guoli Ye|AUTHOR Guoli Ye]]^^3^^, [[Ming Zhou|AUTHOR Ming Zhou]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nankai University, China; ^^2^^Microsoft, China; ^^3^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2112–2116&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The attention-based Transformer model has achieved promising results for speech recognition (SR) in the offline mode. However, in the streaming mode, the Transformer model usually incurs significant latency to maintain its recognition accuracy when applying a fixed-length look-ahead window in each encoder layer. In this paper, we propose a novel low-latency streaming approach for Transformer models, which consists of a scout network and a recognition network. The scout network detects the whole word boundary without seeing any future frames, while the recognition network predicts the next subword by utilizing the information from all the frames before the predicted boundary. Our model achieves the best performance (2.7/6.4 WER) with only an average of 639 ms latency on the test-clean and test-other data sets of Librispeech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gakuto Kurata|AUTHOR Gakuto Kurata]]^^1^^, [[George Saon|AUTHOR George Saon]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, Japan; ^^2^^IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2117–2121&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end training of recurrent neural network transducers (RNN-Ts) does not require frame-level alignments between audio and output symbols. Because of that, the posterior lattices defined by the predictive distributions from different RNN-Ts trained on the same data can differ a lot, which poses a new set of challenges in knowledge distillation between such models. These discrepancies are especially prominent in the posterior lattices between an offline model and a streaming model, which can be expected from the fact that the streaming RNN-T emits symbols later than the offline RNN-T. We propose a method to train an RNN-T so that the posterior peaks at each node in the posterior lattice are aligned with the ones from a pretrained model for the same utterance. By utilizing this method, we can train an offline RNN-T that can serve as a good teacher to train a student streaming RNN-T. Experimental results on the standard Switchboard conversational telephone speech corpus demonstrate accuracy improvements for a streaming unidirectional RNN-T by knowledge distillation from an offline bidirectional counterpart.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Li|AUTHOR Wei Li]], [[James Qin|AUTHOR James Qin]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[Yanzhang He|AUTHOR Yanzhang He]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2122–2126&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advances of end-to-end models have outperformed conventional models through employing a two-pass model. The two-pass model provides better speed-quality trade-offs for on-device speech recognition, where a 1//st//-pass model generates hypotheses in a streaming fashion, and a 2//nd//-pass model rescores the hypotheses with full audio sequence context. The 2//nd//-pass model plays a key role in the quality improvement of the end-to-end model to surpass the conventional model. One main challenge of the two-pass model is the computation latency introduced by the 2//nd//-pass model. Specifically, the original design of the two-pass model uses LSTMs for the 2//nd//-pass model, which are subject to long latency as they are constrained by the recurrent nature and have to run inference sequentially. In this work we explore replacing the LSTM layers in the 2//nd//-pass rescorer with Transformer layers, which can process the entire hypothesis sequences //in parallel// and can therefore utilize the on-device computation resources more efficiently. Compared with an LSTM-based baseline, our proposed Transformer rescorer achieves more than 50% latency reduction with quality improvement.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pau Baquero-Arnal|AUTHOR Pau Baquero-Arnal]], [[Javier Jorge|AUTHOR Javier Jorge]], [[Adrià Giménez|AUTHOR Adrià Giménez]], [[Joan Albert Silvestre-Cerdà|AUTHOR Joan Albert Silvestre-Cerdà]], [[Javier Iranzo-Sánchez|AUTHOR Javier Iranzo-Sánchez]], [[Albert Sanchis|AUTHOR Albert Sanchis]], [[Jorge Civera|AUTHOR Jorge Civera]], [[Alfons Juan|AUTHOR Alfons Juan]]
</p><p class="cpabstractcardaffiliationlist">Universidad Politécnica de Valencia, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2127–2131&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Streaming ASR is gaining momentum due to its wide applicability, though it is still unclear how best to come close to the accuracy of state-of-the-art off-line ASR systems when the output must come within a short delay after the incoming audio stream. Following our previous work on streaming one-pass decoding with hybrid ASR systems and LSTM language models, in this work we report further improvements by replacing LSTMs with Transformer models. First, two key ideas are discussed so as to run these models fast during inference. Then, empirical results on LibriSpeech and TED-LIUM are provided showing that Transformer language models lead to improved recognition rates on both tasks. ASR systems obtained in this work can be seamlessly transferred to a streaming setup with minimal quality losses. Indeed, to the best of our knowledge, no better results have been reported on these tasks when assessed under a streaming setup.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chunyang Wu|AUTHOR Chunyang Wu]], [[Yongqiang Wang|AUTHOR Yongqiang Wang]], [[Yangyang Shi|AUTHOR Yangyang Shi]], [[Ching-Feng Yeh|AUTHOR Ching-Feng Yeh]], [[Frank Zhang|AUTHOR Frank Zhang]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2132–2136&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transformer-based acoustic modeling has achieved great success for both hybrid and sequence-to-sequence speech recognition. However, it requires access to the full sequence, and the computational cost grows quadratically with respect to the input sequence length. These factors limit its adoption for streaming applications. In this work, we proposed a novel augmented memory self-attention, which attends on a short segment of the input sequence and a bank of memories. The memory bank stores the embedding information for all the processed segments. On the librispeech benchmark, our proposed method outperforms all the existing streamable transformer methods by a large margin and achieved over 15% relative error reduction, compared with the widely used LC-BLSTM baseline. Our findings are also confirmed on some large internal datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hirofumi Inaguma|AUTHOR Hirofumi Inaguma]], [[Masato Mimura|AUTHOR Masato Mimura]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2137–2141&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigate a monotonic multihead attention (MMA) by extending hard monotonic attention to Transformer-based automatic speech recognition (ASR) for online streaming applications. For streaming inference, all monotonic attention (MA) heads should learn proper alignments because the next token is not generated until all heads detect the corresponding token boundaries. However, we found not all MA heads learn alignments with a naïve implementation. To encourage every head to learn alignments properly, we propose //HeadDrop// regularization by masking out a part of heads stochastically during training. Furthermore, we propose to prune redundant heads to improve consensus among heads for boundary detection and prevent delayed token generation caused by such heads. Chunkwise attention on each MA head is extended to the multihead counterpart. Finally, we propose //head-synchronous// beam search decoding to guarantee stable streaming inference.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shiliang Zhang|AUTHOR Shiliang Zhang]]^^1^^, [[Zhifu Gao|AUTHOR Zhifu Gao]]^^1^^, [[Haoneng Luo|AUTHOR Haoneng Luo]]^^2^^, [[Ming Lei|AUTHOR Ming Lei]]^^1^^, [[Jie Gao|AUTHOR Jie Gao]]^^1^^, [[Zhijie Yan|AUTHOR Zhijie Yan]]^^1^^, [[Lei Xie|AUTHOR Lei Xie]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Alibaba Group, China; ^^2^^Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2142–2146&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, streaming end-to-end automatic speech recognition (E2E-ASR) has gained more and more attention. Many efforts have been paid to turn the non-streaming attention-based E2E-ASR system into streaming architecture. In this work, we propose a novel online E2E-ASR system by using //Streaming Chunk-Aware Multihead Attention// (SCAMA) and a latency control memory equipped self-attention network (LC-SAN-M). LC-SAN-M uses chunk-level input to control the latency of encoder. As to SCAMA, a jointly trained //predictor// is used to control the output of encoder when feeding to decoder, which enables decoder to generate output in streaming manner. Experimental results on the open 170-hour AISHELL-1 and an industrial-level 20000-hour Mandarin speech recognition tasks show that our approach can significantly outperform the MoChA-based baseline system under comparable setup. On the AISHELL-1 task, our proposed method achieves a character error rate (CER) of 7.39%, to the best of our knowledge, which is the best published performance for online ASR.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Thai-Son Nguyen|AUTHOR Thai-Son Nguyen]], [[Ngoc-Quan Pham|AUTHOR Ngoc-Quan Pham]], [[Sebastian Stüker|AUTHOR Sebastian Stüker]], [[Alex Waibel|AUTHOR Alex Waibel]]
</p><p class="cpabstractcardaffiliationlist">KIT, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2147–2151&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently sequence-to-sequence models have started to achieve state-of-the-art performance on standard speech recognition tasks when processing audio data in batch mode, i.e., the complete audio data is available when starting processing. However, when it comes to performing run-on recognition on an input stream of audio data while producing recognition results in real-time and with low word-based latency, these models face several challenges. For many techniques, the whole audio sequence to be decoded needs to be available at the start of the processing, e.g., for the attention mechanism or the bidirectional LSTM (BLSTM). In this paper, we propose several techniques to mitigate these problems. We introduce an additional loss function controlling the uncertainty of the attention mechanism, a modified beam search identifying partial, stable hypotheses, ways of working with BLSTM in the encoder, and the use of chunked BLSTM. Our experiments show that with the right combination of these techniques, it is possible to perform run-on speech recognition with low word-based latency without sacrificing in word error rate performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^1^^, [[Koji Okabe|AUTHOR Koji Okabe]]^^2^^, [[Hitoshi Yamamoto|AUTHOR Hitoshi Yamamoto]]^^2^^, [[Qiongqiong Wang|AUTHOR Qiongqiong Wang]]^^2^^, [[Ling Guo|AUTHOR Ling Guo]]^^2^^, [[Takafumi Koshinaka|AUTHOR Takafumi Koshinaka]]^^2^^, [[Jiacen Zhang|AUTHOR Jiacen Zhang]]^^3^^, [[Keisuke Ishikawa|AUTHOR Keisuke Ishikawa]]^^3^^, [[Koichi Shinoda|AUTHOR Koichi Shinoda]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NEC, Japan; ^^2^^NEC, Japan; ^^3^^Tokyo Tech, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2227–2231&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The series of //speaker recognition evaluations// (SREs) organized by the National Institute of Standards and Technology (NIST) is widely accepted as the de facto benchmark for speaker recognition technology. This paper describes the NEC-TT speaker verification system developed for the recent SRE’19 CTS Challenge. Our system is based on an x-vector embedding front-end followed by a thin scoring back-end. We trained a very-deep neural network for x-vector extraction by incorporating residual connections, squeeze-and-excitation networks, and angular-margin softmax at the output layer. We enhanced the back-end with a tandem approach leveraging the benefit of supervised and unsupervised domain adaptation. We obtained over 30% relative reduction in error rate with each of these enhancements at the front-end and back-end, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruirui Li|AUTHOR Ruirui Li]]^^1^^, [[Jyun-Yu Jiang|AUTHOR Jyun-Yu Jiang]]^^2^^, [[Xian Wu|AUTHOR Xian Wu]]^^3^^, [[Chu-Cheng Hsieh|AUTHOR Chu-Cheng Hsieh]]^^1^^, [[Andreas Stolcke|AUTHOR Andreas Stolcke]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^University of California at Los Angeles, USA; ^^3^^University of Notre Dame, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2272–2276&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker identification based on voice input is a fundamental capability in speech processing enabling versatile downstream applications, such as personalization and authentication. With the advent of deep learning, most state-of-the-art methods apply machine learning techniques and derive acoustic embeddings from utterances with convolutional neural networks (CNNs) and recurrent neural networks (RNNs). This paper addresses two inherent limitations of current approaches. First, voice characteristics over long time spans might not be fully captured by CNNs and RNNs, as they are designed to focus on local feature extraction and adjacent dependencies modeling, respectively. Second, complex deep learning models can be fragile with regard to subtle but intentional changes in model inputs, also known as adversarial perturbations. To distill informative global acoustic embedding representations from utterances and be robust to adversarial perturbations, we propose a __S__elf-__A__ttentive __A__dversarial __S__peaker-__I__dentification method (//SAASI//). In experiments on the VCTK dataset, //SAASI// significantly outperforms four state-of-the-art baselines in identifying both known and new speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruyun Li|AUTHOR Ruyun Li]]^^1^^, [[Tianyu Liang|AUTHOR Tianyu Liang]]^^1^^, [[Dandan Song|AUTHOR Dandan Song]]^^2^^, [[Yi Liu|AUTHOR Yi Liu]]^^1^^, [[Yangcheng Wu|AUTHOR Yangcheng Wu]]^^1^^, [[Can Xu|AUTHOR Can Xu]]^^1^^, [[Peng Ouyang|AUTHOR Peng Ouyang]]^^2^^, [[Xianwei Zhang|AUTHOR Xianwei Zhang]]^^1^^, [[Xianhong Chen|AUTHOR Xianhong Chen]]^^1^^, [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]^^1^^, [[Shouyi Yin|AUTHOR Shouyi Yin]]^^1^^, [[Liang He|AUTHOR Liang He]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Tsing Micro, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2232–2236&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present the system that THUEE submitted to NIST 2019 Speaker Recognition Evaluation CTS Challenge (SRE19). Similar to the previous SREs, domain mismatches, such as cross-lingual and cross-channel between the training sets and evaluation sets, remain the major challenges in this evaluation. To improve the robustness of our systems, we develop deeper and wider x-vector architectures. Besides, we use novel speaker discriminative embedding systems, hybrid multi-task learning architectures combined with phonetic information. To deal with domain mismatches, we follow a heuristic search scheme to select the best back-end strategy based on limited development corpus. An extended and factorized TDNN achieves the best single-system results on SRE18 DEV and SRE19 EVAL sets. The final system is a fusion of six subsystems, which yields EER 2.81% and minimum cost 0.262 on the SRE19 EVAL set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Grigory Antipov|AUTHOR Grigory Antipov]], [[Nicolas Gengembre|AUTHOR Nicolas Gengembre]], [[Olivier Le Blouch|AUTHOR Olivier Le Blouch]], [[Gaël Le Lan|AUTHOR Gaël Le Lan]]
</p><p class="cpabstractcardaffiliationlist">Orange Labs, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2237–2241&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Fusion of scores is a cornerstone of multimodal biometric systems composed of independent unimodal parts. In this work, we focus on quality-dependent fusion for speaker-face verification. To this end, we propose a universal model which can be trained for automatic quality assessment of both face and speaker modalities. This model estimates the quality of representations produced by unimodal systems which are then used to enhance the score-level fusion of speaker and face verification modules. We demonstrate the improvements brought by this quality-dependent fusion on the recent NIST SRE19 Audio-Visual Challenge dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruijie Tao|AUTHOR Ruijie Tao]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2242–2246&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audio-visual speaker recognition is one of the tasks in the recent 2019 NIST speaker recognition evaluation (SRE). Studies in neuroscience and computer science all point to the fact that vision and auditory neural signals interact in the cognitive process. This motivated us to study a cross-modal network, namely voice-face discriminative network (VFNet) that establishes the general relation between human voice and face. Experiments show that VFNet provides additional speaker discriminative information. With VFNet, we achieve 16.54% equal error rate relative reduction over the score level fusion audio-visual baseline on evaluation set of 2019 NIST SRE.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Suwon Shon|AUTHOR Suwon Shon]]^^1^^, [[James Glass|AUTHOR James Glass]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ASAPP, USA; ^^2^^MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2247–2251&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a multimodal association on a speaker verification system for fine-tuning using both voice and face. Inspired by neuroscientific findings, the proposed approach is to mimic the unimodal perception system benefits from the multisensory association of stimulus pairs. To verify this, we use the SRE18 evaluation protocol for experiments and use out-of-domain data, Voxceleb, for the proposed multimodal fine-tuning. Although the proposed approach relies on voice-face paired multimodal data during the training phase, the face is no more needed after training is done and only speech audio is used for the speaker verification system. In the experiments, we observed that the unimodal model, i.e. speaker verification model, benefits from the multimodal association of voice and face and generalized better than before by learning channel invariant speaker representation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhengyang Chen|AUTHOR Zhengyang Chen]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2252–2256&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The information from different modalities usually compensates each other. In this paper, we use the audio and visual data in VoxCeleb dataset to do person verification. We explored different information fusion strategies and loss functions for the audio-visual person verification system at the embedding level. System performance is evaluated using the public trail lists on VoxCeleb1 dataset. Our best system using audio-visual knowledge at the embedding level achieves ''0.585%, 0.427% and 0.735% EER'' on the three official trial lists of VoxCeleb1, which are the best reported results on this dataset. Moreover, to imitate more complex test environment with one modality corrupted or missing, we construct a noisy evaluation set based on VoxCeleb1 dataset. We use a data augmentation strategy at the embedding level to help our audio-visual system to distinguish the noisy and the clean embedding. With such data augmented strategy, the proposed audio-visual person verification system is more robust on the noisy evaluation set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenyu Wang|AUTHOR Zhenyu Wang]], [[Wei Xia|AUTHOR Wei Xia]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2257–2261&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Forensic audio analysis for speaker verification offers unique challenges due to location/scenario uncertainty and diversity mismatch between reference and naturalistic field recordings. The lack of real naturalistic forensic audio corpora with ground-truth speaker identity represents a major challenge in this field. It is also difficult to directly employ small-scale domain-specific data to train complex neural network architectures due to domain mismatch and loss in performance. Alternatively, cross-domain speaker verification for multiple acoustic environments is a challenging task which could advance research in audio forensics. In this study, we introduce a CRSS-Forensics audio dataset collected in multiple acoustic environments. We pre-train a CNN-based network using the VoxCeleb data, followed by an approach which fine-tunes part of the high-level network layers with clean speech from CRSS-Forensics. Based on this fine-tuned model, we align domain-specific distributions in the embedding space with the discrepancy loss and maximum mean discrepancy (MMD). This maintains effective performance on the clean set, while simultaneously generalizes the model to other acoustic domains. From the results, we demonstrate that diverse acoustic environments affect the speaker verification performance, and that our proposed approach of cross-domain adaptation can significantly improve the results in this scenario.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mufan Sang|AUTHOR Mufan Sang]], [[Wei Xia|AUTHOR Wei Xia]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2262–2266&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In forensic applications, it is very common that only small naturalistic datasets consisting of short utterances in complex or unknown acoustic environments are available. In this study, we propose a pipeline solution to improve speaker verification on a small actual forensic field dataset. By leveraging large-scale out-of-domain datasets, a knowledge distillation based objective function is proposed for teacher-student learning, which is applied for short utterance forensic speaker verification. The objective function collectively considers speaker classification loss, Kullback-Leibler divergence, and similarity of embeddings. In order to advance the trained deep speaker embedding network to be robust for a small target dataset, we introduce a novel strategy to fine-tune the pre-trained student model towards a forensic target domain by utilizing the model as a finetuning start point and a reference in regularization. The proposed approaches are evaluated on the 1^^st^^ 48-UTD forensic corpus, a newly established naturalistic dataset of actual homicide investigations consisting of short utterances recorded in uncontrolled conditions. We show that the proposed objective function can efficiently improve the performance of teacher-student learning on short utterances and that our fine-tuning strategy outperforms the commonly used weight decay method by providing an explicit inductive bias towards the pre-trained model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anurag Chowdhury|AUTHOR Anurag Chowdhury]], [[Austin Cozzo|AUTHOR Austin Cozzo]], [[Arun Ross|AUTHOR Arun Ross]]
</p><p class="cpabstractcardaffiliationlist">Michigan State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2267–2271&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A text-independent speaker recognition system relies on successfully encoding speech factors such as vocal pitch, intensity, and timbre to achieve good performance. A majority of such systems are trained and evaluated using spoken voice or everyday conversational voice data. Spoken voice, however, exhibits a limited range of possible speaker dynamics, thus constraining the utility of the derived speaker recognition models. Singing voice, on the other hand, covers a broader range of vocal and ambient factors and can, therefore, be used to evaluate the robustness of a speaker recognition system. However, a majority of existing speaker recognition datasets only focus on the spoken voice. In comparison, there is a significant shortage of labeled singing voice data suitable for speaker recognition research. To address this issue, we assemble //JukeBox// — a speaker recognition dataset with multilingual singing voice audio annotated with singer identity, gender, and language labels. We use the current state-of-the-art methods to demonstrate the difficulty of performing speaker recognition on singing voice using models trained on spoken voice alone. We also evaluate the effect of gender and language on speaker recognition performance, both in spoken and singing voice data. The complete //JukeBox// dataset can be accessed at http://iprobe.cse.msu.edu/datasets/jukebox.html</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Oleg Rybakov|AUTHOR Oleg Rybakov]], [[Natasha Kononenko|AUTHOR Natasha Kononenko]], [[Niranjan Subrahmanya|AUTHOR Niranjan Subrahmanya]], [[Mirkó Visontai|AUTHOR Mirkó Visontai]], [[Stella Laurenzo|AUTHOR Stella Laurenzo]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2277–2281&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work we explore the latency and accuracy of keyword spotting (KWS) models in streaming and non-streaming modes on mobile phones. NN model conversion from non-streaming mode (model receives the whole input sequence and then returns the classification result) to streaming mode (model receives portion of the input sequence and classifies it incrementally) may require manual model rewriting. We address this by designing a Tensorflow/Keras based library which allows automatic conversion of non-streaming models to streaming ones with minimum effort. With this library we benchmark multiple KWS models in both streaming and non-streaming modes on mobile phones and demonstrate different tradeoffs between latency and accuracy. We also explore novel KWS models with multi-head attention which reduce the classification error over the state-of-art by 10% on Google speech commands data sets V2. The streaming library with all experiments is open-sourced.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongyi Liu|AUTHOR Hongyi Liu]], [[Apurva Abhyankar|AUTHOR Apurva Abhyankar]], [[Yuriy Mishchenko|AUTHOR Yuriy Mishchenko]], [[Thibaud Sénéchal|AUTHOR Thibaud Sénéchal]], [[Gengshen Fu|AUTHOR Gengshen Fu]], [[Brian Kulis|AUTHOR Brian Kulis]], [[Noah D. Stein|AUTHOR Noah D. Stein]], [[Anish Shah|AUTHOR Anish Shah]], [[Shiv Naga Prasad Vitaladevuni|AUTHOR Shiv Naga Prasad Vitaladevuni]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2282–2286&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>As a crucial part of Alexa products, our on-device keyword spotting system detects the wakeword in conversation and initiates subsequent user-device interactions. Convolutional neural networks (CNNs) have been widely used to model the relationship between time and frequency in the audio spectrum. However, it is not obvious how to appropriately leverage the rich descriptive information from device state metadata (such as player state, device type, volume, etc) in a CNN architecture. In this paper, we propose to use metadata information as an additional input feature to improve the performance of a single CNN keyword -spotting model under different conditions. We design a new network architecture for metadata-aware end-to-end keyword spotting which learns to convert the categorical metadata to a fixed length embedding, and then uses the embedding to: 1) modulate convolutional feature maps via conditional batch normalization, and 2) contribute to the fully connected layer via feature concatenation. The experiment shows that the proposed architecture is able to learn the meta-specific characteristics from combined datasets, and the best candidate achieves an average relative false reject rate (FRR) improvement of 14.63% at the same false accept rate (FAR) compared with CNN that does not use device state metadata.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yehao Kong|AUTHOR Yehao Kong]], [[Jiliang Zhang|AUTHOR Jiliang Zhang]]
</p><p class="cpabstractcardaffiliationlist">Peng Cheng Laboratory, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2287–2291&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audio is an important medium in people’s daily life, hidden information can be embedded into audio for covert communication. Current audio information hiding techniques can be roughly classified into time domain-based and transform domain-based techniques. Time domain-based techniques have large hiding capacity but low imperceptibility. Transform domain-based techniques have better imperceptibility, but the hiding capacity is poor. This paper proposes a new audio information hiding technique which shows high hiding capacity and good imperceptibility. The proposed audio information hiding method takes the original audio signal as input and obtains the audio signal embedded with hidden information (called stego audio) through the training of our private DNN-based automatic speech recognition (ASR) model. The experimental results show that the proposed audio information hiding technique has a high hiding capacity of 48 cps with good imperceptibility and high security.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinsheng Wang|AUTHOR Xinsheng Wang]]^^1^^, [[Tingting Qiao|AUTHOR Tingting Qiao]]^^2^^, [[Jihua Zhu|AUTHOR Jihua Zhu]]^^1^^, [[Alan Hanjalic|AUTHOR Alan Hanjalic]]^^2^^, [[Odette Scharenborg|AUTHOR Odette Scharenborg]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^XJTU, China; ^^2^^Technische Universiteit Delft, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2292–2296&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>An estimated half of the world’s languages do not have a written form, making it impossible for these languages to benefit from any existing text-based technologies. In this paper, a speech-to-image generation (S2IG) framework is proposed which translates speech descriptions to photo-realistic images without using any text information, thus allowing unwritten languages to potentially benefit from this technology. The proposed S2IG framework, named S2IGAN, consists of a speech embedding network (SEN) and a relation-supervised densely-stacked generative model (RDG). SEN learns the speech embedding with the supervision of the corresponding visual information. Conditioned on the speech embedding produced by SEN, the proposed RDG synthesizes images that are semantically consistent with the corresponding speech descriptions. Extensive experiments on datasets CUB and Oxford-102 demonstrate the effectiveness of the proposed S2IGAN on synthesizing high-quality and semantically-consistent images from the speech signal, yielding a good performance and a solid baseline for the S2IG task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Juan Zuluaga-Gomez|AUTHOR Juan Zuluaga-Gomez]]^^1^^, [[Petr Motlicek|AUTHOR Petr Motlicek]]^^1^^, [[Qingran Zhan|AUTHOR Qingran Zhan]]^^1^^, [[Karel Veselý|AUTHOR Karel Veselý]]^^2^^, [[Rudolf Braun|AUTHOR Rudolf Braun]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Idiap Research Institute, Switzerland; ^^2^^Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2297–2301&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Advances in Automatic Speech Recognition (ASR) over the last decade opened new areas of speech-based automation such as in Air-Traffic Control (ATC) environments. Currently, voice communication and data links communications are the only way of contact between pilots and Air-Traffic Controllers (ATCo), where the former is the most widely used and the latter is a non-spoken method mandatory for oceanic messages and limited for some domestic issues. ASR systems on ATCo environments inherit increasing complexity due to accents from non-English speakers, cockpit noise, speaker-dependent biases and small in-domain ATC databases for training. Hereby, we introduce CleanSky EC-H2020 ATCO2, a project that aims to develop an ASR-based platform to collect, organize and automatically pre-process ATCo speech-data from air space. This paper conveys an exploratory benchmark of several state-of-the-art ASR models trained on more than 170 hours of ATCo speech-data. We demonstrate that the cross-accent flaws due to speakers’ accents are minimized due to the amount of data, making the system feasible for ATC environments. The developed ASR system achieves an averaged word error rate (WER) of 7.75% across four databases. An additional 35% relative improvement in WER is achieved on one test set when training a TDNNF system with byte-pair encoding.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Prithvi R.R. Gudepu|AUTHOR Prithvi R.R. Gudepu]], [[Gowtham P. Vadisetti|AUTHOR Gowtham P. Vadisetti]], [[Abhishek Niranjan|AUTHOR Abhishek Niranjan]], [[Kinnera Saranu|AUTHOR Kinnera Saranu]], [[Raghava Sarma|AUTHOR Raghava Sarma]], [[M. Ali Basha Shaik|AUTHOR M. Ali Basha Shaik]], [[Periyasamy Paramasivam|AUTHOR Periyasamy Paramasivam]]
</p><p class="cpabstractcardaffiliationlist">Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2302–2306&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) systems are known to perform poorly under whispered speech conditions. One of the primary reasons is the lack of large annotated whisper corpora. To address this challenge, we propose data augmentation with synthetic whisper corpus generated from normal speech using Cycle-Consistent Generative Adversarial Network (CycleGAN). We train CycleGAN model with a limited corpus of parallel whispered and normal speech, aligned using Dynamic Time Warping (DTW). The model learns frame-wise mapping from feature vectors of normal speech to those of whisper. We then augment ASR systems with the generated synthetic whisper corpus. In this paper, we validate our proposed approach using state-of-the-art end-to-end (E2E) and hybrid ASR systems trained on publicly available Librispeech, wTIMIT and internally recorded far-field corpora. We achieved 23% relative reduction in word error rate (WER) compared to baseline on whisper test sets. In addition, we also achieved WER reductions on Librispeech and far-field test sets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ramit Sawhney|AUTHOR Ramit Sawhney]]^^1^^, [[Arshiya Aggarwal|AUTHOR Arshiya Aggarwal]]^^2^^, [[Piyush Khanna|AUTHOR Piyush Khanna]]^^3^^, [[Puneet Mathur|AUTHOR Puneet Mathur]]^^4^^, [[Taru Jain|AUTHOR Taru Jain]]^^3^^, [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NSUT, India; ^^2^^Adobe, India; ^^3^^IIIT Delhi, India; ^^4^^University of Maryland at College Park, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2307–2311&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Stock volatility is a degree of deviations from expected returns, and thus, estimates risk, which is crucial for investment decision making. Volatility forecasting is complex given the stochastic nature of market microstructure, where we use frenzied data over various modalities to make temporally dependent forecasts. Transcripts of earnings calls of companies are well studied for risk modeling as they offer unique investment insight into stock performance. Anecdotal evidence shows company CEO’s vocal cues could be indicative of the stock performance. The recently developing body of work on analyzing earnings calls treat stocks as independent of each other, thus not using rich relations between stocks. To this end, we introduce the first neural model that employs cross inter-modal attention for deep verbal-vocal coherence and accounts for stock interdependence through multi-layer network embeddings. We show that our approach outperforms state-of-the-art methods by augmenting speech features with correlations from text and stock network modalities. Lastly, we analyse the components and financial implications of our method through an ablation and case study.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Huili Chen|AUTHOR Huili Chen]]^^1^^, [[Bita Darvish|AUTHOR Bita Darvish]]^^2^^, [[Farinaz Koushanfar|AUTHOR Farinaz Koushanfar]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of California at San Diego, USA; ^^2^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2312–2316&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic Speech Recognition (ASR) systems are widely deployed in various applications due to their superior performance. However, obtaining a highly accurate ASR model is non-trivial since it requires the availability of a massive amount of proprietary training data and enormous computational resources. As such, pre-trained ASR models shall be considered as the intellectual property (IP) of the model designer and protected against copyright infringement attacks. In this paper, we propose SpecMark, the first spectral watermarking framework that seamlessly embeds a //watermark// (WM) in the spectrum of the ASR model for //ownership proof//. SpecMark identifies the significant frequency components of the model parameters and encodes the owner’s WM in the corresponding spectrum region before sharing the model with end-users. The model builder can later extract the spectral WM to verify his ownership of the marked ASR system. We evaluate SpecMark’s performance using DeepSpeech model with three different speech datasets. Empirical results corroborate that SpecMark incurs negligible overhead and preserves the recognition accuracy of the original system. Furthermore, SpecMark sustains diverse model modifications, including parameter pruning and transfer learning.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Justin van der Hout|AUTHOR Justin van der Hout]]^^1^^, [[Zoltán D’Haese|AUTHOR Zoltán D’Haese]]^^2^^, [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]^^3^^, [[Odette Scharenborg|AUTHOR Odette Scharenborg]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universiteit Delft, The Netherlands; ^^2^^KU Leuven, Belgium; ^^3^^University of Illinois at Urbana-Champaign, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2317–2321&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Image2Speech is the relatively new task of generating a spoken description of an image. This paper presents an investigation into the evaluation of this task. For this, first an Image2Speech system was implemented which generates image captions consisting of phoneme sequences. This system outperformed the original Image2Speech system on the Flickr8k corpus. Subsequently, these phoneme captions were converted into sentences of words. The captions were rated by human evaluators for their goodness of describing the image. Finally, several objective metric scores of the results were correlated with these human ratings. Although BLEU4 does not perfectly correlate with human ratings, it obtained the highest correlation among the investigated metrics, and is the best currently existing metric for the Image2Speech task. Current metrics are limited by the fact that they assume their input to be words. A more appropriate metric for the Image2Speech task should assume its input to be parts of words, i.e. phonemes, instead.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei-Cheng Lin|AUTHOR Wei-Cheng Lin]], [[Carlos Busso|AUTHOR Carlos Busso]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2322–2326&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>//Speech emotion recognition// (SER) plays an important role in multiple fields such as healthcare, //human-computer interaction// (HCI), and security and defense. Emotional labels are often annotated at the sentence-level (i.e., one label per sentence), resulting in a sequence-to-one recognition problem. Traditionally, studies have relied on statistical descriptions, which are computed over time from //low level descriptors// (LLDs), creating a fixed dimension sentence-level feature representation regardless of the duration of the sentence. However sentence-level features lack temporal information, which limits the performance of SER systems. Recently, new deep learning architectures have been proposed to model temporal data. An important question is how to extract emotion-relevant features with temporal information. This study proposes a novel data processing approach that extracts a fixed number of small chunks over sentences of different durations by changing the overlap between these chunks. The approach is flexible, providing an ideal framework to combine gated network or attention mechanisms with //long short-term memory// (LSTM) networks. Our experimental results based on the MSP-Podcast dataset demonstrate that the proposed method not only significantly improves recognition accuracy over alternative temporal-based models relying on LSTM, but also leads to computational efficiency.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siddique Latif|AUTHOR Siddique Latif]]^^1^^, [[Rajib Rana|AUTHOR Rajib Rana]]^^1^^, [[Sara Khalifa|AUTHOR Sara Khalifa]]^^2^^, [[Raja Jurdak|AUTHOR Raja Jurdak]]^^3^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern Queensland, Australia; ^^2^^CSIRO, Australia; ^^3^^Queensland University of Technology, Australia; ^^4^^Imperial College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2327–2331&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech emotion recognition systems (SER) can achieve high accuracy when the training and test data are identically distributed, but this assumption is frequently violated in practice and the performance of SER systems plummet against unforeseen data shifts. The design of robust models for accurate SER is challenging, which limits its use in practical applications. In this paper we propose a deeper neural network architecture wherein we fuse Dense Convolutional Network (DenseNet), Long short-term memory (LSTM) and Highway Network to learn powerful discriminative features which are robust to noise. We also propose data augmentation with our network architecture to further improve the robustness. We comprehensively evaluate the architecture coupled with data augmentation against (1) noise, (2) adversarial attacks and (3) cross-corpus settings. Our evaluations on the widely used IEMOCAP and MSP-IMPROV datasets show promising results when compared with existing studies and state-of-the-art models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takuya Fujioka|AUTHOR Takuya Fujioka]], [[Takeshi Homma|AUTHOR Takeshi Homma]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]
</p><p class="cpabstractcardaffiliationlist">Hitachi, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2332–2336&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion labels in emotion recognition corpora are highly noisy and ambiguous, due to the annotators’ subjective perception of emotions. Such ambiguity may introduce errors in automatic classification and affect the overall performance. We therefore propose a dynamic label correction and sample contribution weight estimation model. Our model is based on a standard BLSTM model with attention with two extra parameters. The first learns a new corrected label distribution and aims to fix the inaccurate labels in the dataset. The other estimates the contribution of each sample to the training process and aims to ignore the ambiguous and noisy samples while giving higher weights to the clear ones. We train our model through an alternating optimization method, where in the first epoch we update the neural network parameters, and in the second we keep them fixed to update the label correction and sample importance parameters. When training and evaluating our model on the IEMOCAP dataset, we obtained a weighted accuracy (WA) and unweighted accuracy (UA) of 65.9% and 61.4%, respectively. This yielded an absolute improvement of 2.3% and 1.9%, respectively, compared to a BLSTM with attention baseline, trained on the corpus gold labels.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiaxing Liu|AUTHOR Jiaxing Liu]], [[Zhilei Liu|AUTHOR Zhilei Liu]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Yuan Gao|AUTHOR Yuan Gao]], [[Lili Guo|AUTHOR Lili Guo]], [[Jianwu Dang|AUTHOR Jianwu Dang]]
</p><p class="cpabstractcardaffiliationlist">Tianjin University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2337–2341&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>As the fundamental research of affective computing, speech emotion recognition (SER) has gained a lot of attention. Unlike with common deep learning tasks, SER was restricted by the scarcity of emotional speech datasets. In this paper, the vector quantization variational automatic encoder (VQ-VAE) was introduced and trained by massive unlabeled data in an unsupervised manner. Benefiting from the excellent invariant distribution encoding capability and discrete embedding space of VQ-VAE, the pre-trained VQ-VAE could learn latent representation from labeled data. The extracted latent representation could serve as the additional source data to make data abundantly available. While solving data lacking issue, sequence information modeling was also taken into account which was considered useful for SER. The proposed sequence model, temporal attention convolutional network (TACN) was simple yet good at learning contextual information from limited data which was not friendly to complicated structures of recurrent neural network (RNN) based sequence models. To validate the effectiveness of the latent representation, t-distributed stochastic neighbor embedding (t-SNE) was introduced to analyze the visualizations. To verify the performance of the proposed TACN, quantitative classification results of all commonly used sequence models were provided. Our proposed model achieved state-of-the-art performance on IEMOCAP.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhi Zhu|AUTHOR Zhi Zhu]], [[Yoshinao Sato|AUTHOR Yoshinao Sato]]
</p><p class="cpabstractcardaffiliationlist">Fairy Devices, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2342–2346&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Research on affective computing has achieved remarkable success with the development of deep learning. One of the major difficulties in emotion recognition is inconsistent criteria for emotion categorization between multiple corpora. Most previous studies using multiple corpora discard or merge a part of their emotion classes. This prescription causes catastrophic information loss with respect to emotion categorization. Furthermore, the influences of corpus-specific factors other than emotions, such as languages, speech registers, and recording environments, should be eliminated to fully utilize multiple corpora. In this paper, we address the challenge of reconciling multiple emotion corpora by learning a corpus-independent emotion encoding disentangled from all the remaining factors without causing catastrophic information loss. For this purpose, we propose a model that consists of a shared emotion encoder, multiple emotion classifiers, and an adversarial corpus discriminator. This model is trained with multi-task learning harnessed by adversarial learning. We conducted speech emotion classification experiments with our method on two corpora, namely, EmoDB and CREMA-D. The results demonstrate that our method achieves higher accuracies than mono-corpus models. In addition, it is indicated that the proposed method suppresses corpus-dependent factors other than emotions in the embedding space.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zheng Lian|AUTHOR Zheng Lian]]^^1^^, [[Jianhua Tao|AUTHOR Jianhua Tao]]^^1^^, [[Bin Liu|AUTHOR Bin Liu]]^^1^^, [[Jian Huang|AUTHOR Jian Huang]]^^1^^, [[Zhanlei Yang|AUTHOR Zhanlei Yang]]^^2^^, [[Rongjun Li|AUTHOR Rongjun Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Huawei Technologies, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2347–2351&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Different from the emotion estimation in individual utterances, context-sensitive and speaker-sensitive dependences are vitally pivotal for conversational emotion analysis. In this paper, we propose a graph-based neural network to model these dependences. Specifically, our approach represents each utterance and each speaker as a node. To bridge the context-sensitive dependence, each utterance node has edges between immediate utterances from the same conversation. Meanwhile, the directed edges between each utterance node and its speaker node bridge the speaker-sensitive dependence. To verify the effectiveness of our strategy, we conduct experiments on the MELD dataset. Experimental results demonstrate that our method shows an absolute improvement of 1%~2% over state-of-the-art strategies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuiyang Mao|AUTHOR Shuiyang Mao]], [[P.C. Ching|AUTHOR P.C. Ching]], [[Tan Lee|AUTHOR Tan Lee]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2352–2356&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human emotional speech is, by its very nature, a variant signal. This results in dynamics intrinsic to automatic emotion classification based on speech. In this work, we explore a spectral decomposition method stemming from fluid-dynamics, known as Dynamic Mode Decomposition (DMD), to computationally represent and analyze the global utterance-level dynamics of emotional speech. Specifically, segment-level emotion-specific representations are first learned through an Emotion Distillation process. This forms a multi-dimensional signal of emotion flow for each utterance, called Emotion Profiles (EPs). The DMD algorithm is then applied to the resultant EPs to capture the eigenfrequencies, and hence the fundamental transition dynamics of the emotion flow. Evaluation experiments using the proposed approach, which we call EigenEmo, show promising results. Moreover, due to the positive combination of their complementary properties, concatenating the utterance representations generated by EigenEmo with simple EPs averaging yields noticeable gains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuiyang Mao|AUTHOR Shuiyang Mao]]^^1^^, [[P.C. Ching|AUTHOR P.C. Ching]]^^1^^, [[C.-C. Jay Kuo|AUTHOR C.-C. Jay Kuo]]^^2^^, [[Tan Lee|AUTHOR Tan Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2357–2361&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Categorical speech emotion recognition is typically performed as a sequence-to-label problem, i. e., to determine the discrete emotion label of the input utterance as a whole. One of the main challenges in practice is that most of the existing emotion corpora do not give ground truth labels for each segment; instead, we only have labels for whole utterances. To extract segment-level emotional information from such weakly labeled emotion corpora, we propose using multiple instance learning (MIL) to learn segment embeddings in a weakly supervised manner. Also, for a sufficiently long utterance, not all of the segments contain relevant emotional information. In this regard, three attention-based neural network models are then applied to the learned segment embeddings to attend the most salient part of a speech utterance. Experiments on the CASIA corpus and the IEMOCAP database show better or highly competitive results than other state-of-the-art approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sara Akbarzadeh|AUTHOR Sara Akbarzadeh]]^^1^^, [[Sungmin Lee|AUTHOR Sungmin Lee]]^^2^^, [[Chin-Tuan Tan|AUTHOR Chin-Tuan Tan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Dallas, USA; ^^2^^Tongmyong University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2497–2501&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigated the effect of sound level on the spatial selective auditory attention of Normal Hearing (NH) and Cochlear Implant (CI) listeners behaviorally and electrophysiologically. Three spatially separated speech streams consisting of one target and two maskers were presented to the subjects. While keeping the same target to masker ratio, the target stimuli were presented at three different sound levels. In the behavioral test, subjects were instructed to attend the target speech, and speech perception score was calculated based on correctly recognized words in percentage. In the electrophysiological test, electroencephalography (EEG) signals were recorded while the subjects were listening for target speech in the presence of two maskers. The attended speech envelope was reconstructed from EEG using a linear regression model. The spatial auditory attention was detected through comparison of the reconstructed speech envelope with the original envelopes associated with the three speech streams presented. Outcome of both behavioral and electrophysiological experiments showed that an increase in the sound level decreases the spatial speech recognition performance of CI listeners, but not NH listeners.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abhishek Shivkumar|AUTHOR Abhishek Shivkumar]], [[Jack Weston|AUTHOR Jack Weston]], [[Raphael Lenain|AUTHOR Raphael Lenain]], [[Emil Fristed|AUTHOR Emil Fristed]]
</p><p class="cpabstractcardaffiliationlist">Novoic, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2542–2546&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce BlaBla, an open-source Python library for extracting linguistic features with proven clinical relevance to neurological and psychiatric diseases across many languages. BlaBla is a unifying framework for accelerating and simplifying clinical linguistic research. The library is built on state-of-the-art NLP frameworks and supports multithreaded/GPU-enabled feature extraction via both native Python calls and a command line interface. We describe BlaBla’s architecture and clinical validation of its features across 12 diseases. We further demonstrate the application of BlaBla to a task visualizing and classifying language disorders in three languages on real clinical data from the AphasiaBank dataset. We make the codebase freely available to researchers with the hope of providing a consistent, well-validated foundation for the next generation of clinical linguistic research.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yangyang Wan|AUTHOR Yangyang Wan]]^^1^^, [[Huali Zhou|AUTHOR Huali Zhou]]^^2^^, [[Qinglin Meng|AUTHOR Qinglin Meng]]^^2^^, [[Nengheng Zheng|AUTHOR Nengheng Zheng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Shenzhen University, China; ^^2^^SCUT, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2502–2506&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Normal hearing listeners mainly use interaural time differences (ITDs) and interaural level differences (ILDs) to localize sound sources in the horizontal plane. Listeners with bilateral cochlear implants (CIs), however, have poor sensitivity to ITDs which significantly limits their spatial hearing capabilities. Most CI signal processing strategies, such as the continuous interleaved sampling (CIS) strategy, are temporal-envelope-based, and the temporal fine structure (TFS), which contains useful cues for ITDs, is discarded. Recently, a temporal limits encoder (TLE) CI strategy was proposed to implicitly introduce the TFS while preserving the temporal envelope. It has demonstrated benefits in unilateral CI simulations in tasks including speech-in-noise understanding and pitch perception. This study investigates whether the ITD cues could be enhanced by a bilateral TLE strategy. Identification of five ITDs respectively associated with five sound source directions was tested with vocoded speech stimuli to compare the performance of the bilateral TLE and CIS strategies. Results show that the bilateral TLE has better overall performance than the bilateral CIS. This finding suggests that the bilateral TLE is promising in providing enhanced ITD cues for bilateral CI users.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Toshio Irino|AUTHOR Toshio Irino]], [[Soichi Higashiyama|AUTHOR Soichi Higashiyama]], [[Hanako Yoshigi|AUTHOR Hanako Yoshigi]]
</p><p class="cpabstractcardaffiliationlist">Wakayama University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2507–2511&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We performed two experiments to ascertain whether vocal self-training improves speech clarity, particularly when the feedback speech is degraded by a hearing impairment simulator. Speech sounds before and after the training were recorded under noisy and quiet conditions and their speech clarity was evaluated by subjective listening tests using Scheffe’s paired comparison. We also analyzed the auditory modulation features to derive an index to explain the subjective clarity scores. The auditory modulation index highly correlated with subjective scores and seems a good candidate for predicting speech clarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhuohuang Zhang|AUTHOR Zhuohuang Zhang]], [[Donald S. Williamson|AUTHOR Donald S. Williamson]], [[Yi Shen|AUTHOR Yi Shen]]
</p><p class="cpabstractcardaffiliationlist">Indiana University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2512–2516&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Phase serves as a critical component of speech that influences the quality and intelligibility. Current speech enhancement algorithms are beginning to address phase distortions, but the algorithms focus on normal-hearing (NH) listeners. It is not clear whether phase enhancement is beneficial for hearing-impaired (HI) listeners. We investigated the influence of phase distortion on speech quality through a listening study, in which NH and HI listeners provided speech-quality ratings using the MUSHRA procedure. In one set of conditions, the speech was mixed with babble noise at 4 different signal-to-noise ratios (SNRs) from -5 to 10 dB. In another set of conditions, the SNR was fixed at 10 dB and the noisy speech was presented in a simulated reverberant room with T60s ranging from 100 to 1000 ms. The speech level was kept at 65 dB SPL for NH listeners and amplification was applied for HI listeners to ensure audibility. Ideal ratio masking (IRM) was used to simulate speech enhancement. Two objective metrics (i.e., PESQ and HASQI) were utilized to compare subjective and objective ratings. Results indicate that phase distortion has a negative impact on perceived quality for both groups and PESQ is more closely correlated with human ratings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhuo Zhang|AUTHOR Zhuo Zhang]], [[Gaoyan Zhang|AUTHOR Gaoyan Zhang]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Shuang Wu|AUTHOR Shuang Wu]], [[Di Zhou|AUTHOR Di Zhou]], [[Longbiao Wang|AUTHOR Longbiao Wang]]
</p><p class="cpabstractcardaffiliationlist">Tianjin University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2517–2521&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A healthy person can attend to one speech in a multi-speaker scenario, however, this ability is not available to some people suffering from hearing impairments. Therefore, research on auditory attention detection based on electroencephalography (EEG) is a possible way to help hearing-impaired listeners detect the focused speech. Many previous studies used linear models or deep learning to decode the attended speech, but the cross-subject decoding accuracy is low, especially within a short time duration. In this study, we propose a multi-task learning model based on convolutional neural networks (CNN) to simultaneously perform attention decoding and reconstruct the attended temporal amplitude envelopes (TAEs) in a 2s time condition. The experimental results show that, compared to the traditional linear method, both the subject-specific and cross-subject decoding performance showed great improvement. Particularly, the cross-subject decoding accuracy was improved from 56% to 82% in 2s condition in the dichotic listening experiment. Furthermore, it was found that the frontal and temporal regions of the brain were more important for the detection of auditory attention by analyzing the channel contribution map. In summary, the proposed method is promising for nerve-steered hearing aids which can help hearing-impaired listeners to make faster and accurate attention detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sondes Abderrazek|AUTHOR Sondes Abderrazek]]^^1^^, [[Corinne Fredouille|AUTHOR Corinne Fredouille]]^^1^^, [[Alain Ghio|AUTHOR Alain Ghio]]^^2^^, [[Muriel Lalain|AUTHOR Muriel Lalain]]^^2^^, [[Christine Meunier|AUTHOR Christine Meunier]]^^2^^, [[Virginie Woisard|AUTHOR Virginie Woisard]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIA (EA 4128), France; ^^2^^LPL (UMR 7309), France; ^^3^^UT2J (EA 4156), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2522–2526&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Perceptual measurement is still the most common method for assessing disordered speech in clinical practice. The subjectivity of such a measure, strongly due to human nature, but also to its lack of interpretation with regard to local alterations in speech units, strongly motivates a sophisticated tool for objective evaluation. Of interest is the increasing performance of Deep Neural Networks in speech applications, but more importantly the fact that they are no longer considered as black boxes. The work carried out here is the first step in a long-term research project, which aims to determine the linguistic units that contribute most to the maintenance or loss of the intelligibility in speech disorders. In this context, we study a CNN trained on normal speech for a classification task of phones and tested on pathological speech. The aim of this first study is to analyze the response of the CNN model to disordered speech in order to study later its effectiveness in providing relevant knowledge in terms of speech severity or loss of intelligibility. Compared to perceptual severity and intelligibility measures, the results revealed a very strong correlation between these metrics and our classifier performance scores, which is very promising for future work.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bahman Mirheidari|AUTHOR Bahman Mirheidari]]^^1^^, [[Daniel Blackburn|AUTHOR Daniel Blackburn]]^^1^^, [[Ronan O’Malley|AUTHOR Ronan O’Malley]]^^1^^, [[Annalena Venneri|AUTHOR Annalena Venneri]]^^1^^, [[Traci Walker|AUTHOR Traci Walker]]^^2^^, [[Markus Reuber|AUTHOR Markus Reuber]]^^2^^, [[Heidi Christensen|AUTHOR Heidi Christensen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Sheffield, UK; ^^2^^Royal Hallamshire Hospital, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2527–2531&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Early detection of cognitive impairment is of great clinical importance. Current cognitive tests assess language and speech abilities. Recently, we have developed a fully automated system to detect cognitive impairment from the analysis of conversations between a person and an intelligent virtual agent (IVA). Promising results have been achieved, however more data than is typically available in the medical domain is required to train more complex classifiers. Data augmentation using generative models has been demonstrated to be an effective approach. In this paper, we use a variational autoencoder to augment data at the feature-level as opposed to the speech signal-level. We investigate whether this suits some feature types (e.g., acoustic, linguistic) better than others. We evaluate the approach on IVA recordings of people with four different cognitive impairment conditions. F-scores of a four-way logistic regression (LR) classifier are improved for certain feature types. For a deep neural network (DNN) classifier, the improvement is seen for almost all feature types. The F-score of the LR classifier on the combined features increases from 55% to 60%, and for the DNN classifier from 49% to 62%. Further improvements are gained by feature selection: 88% and 80% F-scores for LR and DNN classifiers respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Meredith Moore|AUTHOR Meredith Moore]]^^1^^, [[Piyush Papreja|AUTHOR Piyush Papreja]]^^2^^, [[Michael Saxon|AUTHOR Michael Saxon]]^^3^^, [[Visar Berisha|AUTHOR Visar Berisha]]^^2^^, [[Sethuraman Panchanathan|AUTHOR Sethuraman Panchanathan]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Drake University, USA; ^^2^^Arizona State University, USA; ^^3^^University of California at Santa Barbara, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2532–2536&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To facilitate more accessible spoken language technologies and advance the study of dysphonic speech this paper presents UncommonVoice, a freely-available, crowd-sourced speech corpus consisting of 8.5 hours of speech from 57 individuals, 48 of whom have spasmodic dysphonia. The speech material consists of non-words (prolonged vowels, and the prompt for diadochokinetic rate), sentences (randomly selected from TIMIT prompts and the CAPE-V intelligibility analysis), and spontaneous image descriptions. The data was recorded in a crowdsourced manner using a web-based application. This dataset is a fundamental resource for the development of voice-assistive technologies for individuals with dysphonia as well as the enhancement of the accessibility of voice-based technologies (automatic speech recognition, virtual assistants, etc). Research on articulation differences as well as how best to model and represent dysphonic speech will greatly benefit from a free and publicly available dataset of dysphonic speech. The dataset will be made freely and publicly available at www.uncommonvoice.org. In the following sections, we detail the data collection process as well as provide an initial analysis of the speech corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Purva Barche|AUTHOR Purva Barche]], [[Krishna Gurugubelli|AUTHOR Krishna Gurugubelli]], [[Anil Kumar Vuppala|AUTHOR Anil Kumar Vuppala]]
</p><p class="cpabstractcardaffiliationlist">IIIT Hyderabad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2537–2541&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic detection and assessment of voice disorders is important in diagnosis and treatment planning of voice disorders. This work proposes an approach for automatic detection and assessment of voice disorders from a clinical perspective. To accomplish this, a multi-level classification approach was explored in which four binary classifiers were used for the assessment of voice disorders. The binary classifiers were trained using support vector machines with excitation source features, vocal-tract system features, and state-of-art OpenSMILE features. In this study source features namely, glottal parameters obtained from glottal flow waveform, perturbation measures obtained from epoch locations, and cepstral features obtained from linear prediction residual and zero frequency filtered signal were explored. The present study used the Saarbucken voice disorders database to evaluate the performance of proposed approach. The OpenSMILE features namely ComParE and eGEMAPS feature sets shown better performance in terms of classification accuracies of 82.8% and 76%, respectively for voice disorder detection. The combination of excitation source features with baseline feature sets further improved the performance of detection and assessment systems, that highlight the complimentary nature of exciting source features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abdolreza Sabzi Shahrebabaki|AUTHOR Abdolreza Sabzi Shahrebabaki]]^^1^^, [[Negar Olfati|AUTHOR Negar Olfati]]^^1^^, [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]]^^2^^, [[Giampiero Salvi|AUTHOR Giampiero Salvi]]^^1^^, [[Torbjørn Svendsen|AUTHOR Torbjørn Svendsen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTNU, Norway; ^^2^^Università di Enna “Kore”, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2877–2881&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Articulatory information has been argued to be useful for several speech tasks. However, in most practical scenarios this information is not readily available. We propose a novel transfer learning framework to obtain reliable articulatory information in such cases. We demonstrate its reliability both in terms of estimating parameters of speech production and its ability to enhance the accuracy of an end-to-end phone recognizer. Articulatory information is estimated from speaker independent phonemic features, using a small speech corpus, with electro-magnetic articulography (EMA) measurements. Next, we employ a teacher-student model to learn estimation of articulatory features from acoustic features for the targeted phone recognition task. Phone recognition experiments, demonstrate that the proposed transfer learning approach outperforms the baseline transfer learning system acquired directly from an acoustic-to-articulatory (AAI) model. The articulatory features estimated by the proposed method, in conjunction with acoustic features, improved the phone error rate (PER) by 6.7% and 6% on the TIMIT core test and development sets, respectively, compared to standalone static acoustic features. Interestingly, this improvement is slightly higher than what is obtained by static+dynamic acoustic features, but with a significantly less. Adding articulatory features on top of static+dynamic acoustic features yields a small but positive PER improvement.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abinay Reddy Naini|AUTHOR Abinay Reddy Naini]]^^1^^, [[Malla Satyapriya|AUTHOR Malla Satyapriya]]^^2^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^RGUKT, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2922–2926&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we proposed a method to detect the whispered speech region in a noisy audio file called whisper activity detection (WAD). Due to the lack of pitch and noisy nature of whispered speech, it makes WAD a way more challenging task than standard voice activity detection (VAD). In this work, we proposed a Long-short term memory (LSTM) based whisper activity detection algorithm. However, this LSTM network is trained by keeping it as an attention pooling layer to a Convolutional neural network (CNN), which is trained for a speaker identification task. WAD experiments with 186 speakers, with eight noise types in seven different signal-to-noise ratio (SNR) conditions, show that the proposed method performs better than the best baseline scheme in most of the conditions. Particularly in the case of unknown noises and environmental conditions, the proposed WAD performs significantly better than the best baseline scheme. Another key advantage of the proposed WAD method is that it requires only a small part of the training data with annotation to fine-tune the post-processing parameters, unlike the existing baseline schemes requiring full training data annotated with the whispered speech regions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abdolreza Sabzi Shahrebabaki|AUTHOR Abdolreza Sabzi Shahrebabaki]]^^1^^, [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]]^^2^^, [[Giampiero Salvi|AUTHOR Giampiero Salvi]]^^1^^, [[Torbjørn Svendsen|AUTHOR Torbjørn Svendsen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTNU, Norway; ^^2^^Università di Enna “Kore”, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2882–2886&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a new acoustic-to-articulatory inversion (AAI) sequence-to-sequence neural architecture, where spectral sub-bands are independently processed in time by 1-dimensional (1-D) convolutional filters of different sizes. The learned features maps are then combined and processed by a recurrent block with bi-directional long short-term memory (BLSTM) gates for preserving the smoothly varying nature of the articulatory trajectories. Our experimental evidence shows that, on a speaker dependent AAI task, in spite of the reduced number of parameters, our model demonstrates better root mean squared error (RMSE) and Pearson’s correlation coefficient (PCC) than a both a BLSTM model and an FC-BLSTM model where the first stages are fully connected layers. In particular, the average RMSE goes from 1.401 when feeding the filterbank features directly into the BLSTM, to 1.328 with the FC-BLSTM model, and to 1.216 with the proposed method. Similarly, the average PCC increases from 0.859 to 0.877, and 0.895, respectively. On a speaker independent AAI task, we show that our convolutional features outperform the original filterbank features, and can be combined with phonetic features bringing independent information to the solution of the problem. To the best of the authors’ knowledge, we report the best results on the given task and data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bernardo B. Gatto|AUTHOR Bernardo B. Gatto]]^^1^^, [[Eulanda M. dos Santos|AUTHOR Eulanda M. dos Santos]]^^1^^, [[Juan G. Colonna|AUTHOR Juan G. Colonna]]^^1^^, [[Naoya Sogi|AUTHOR Naoya Sogi]]^^2^^, [[Lincon S. Souza|AUTHOR Lincon S. Souza]]^^2^^, [[Kazuhiro Fukui|AUTHOR Kazuhiro Fukui]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UFAM, Brazil; ^^2^^University of Tsukuba, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2887–2891&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Classifying bioacoustic signals is a fundamental task for ecological monitoring. However, this task includes several challenges, such as nonuniform signal length, environmental noise, and scarce training data. To tackle these challenges, we present a discriminative mechanism to classify bioacoustic signals, which does not require a large amount of training data and handles nonuniform signal length. The proposed method relies on transforming the input signals into subspaces generated by the singular spectrum analysis (SSA). Then, the difference between the subspaces is used as a discriminative space, providing discriminative features. This formulation allows a segmentation-free approach to represent and classify bioacoustic signals, as well as a highly compact descriptor inherited from the SSA. We validate the proposed method using challenging datasets containing a variety of bioacoustic signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Renuka Mannem|AUTHOR Renuka Mannem]]^^1^^, [[Hima Jyothi R.|AUTHOR Hima Jyothi R.]]^^2^^, [[Aravind Illa|AUTHOR Aravind Illa]]^^1^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^RGUKT, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2892–2896&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, speech rate is estimated using the task-specific representations which are learned from the acoustic-articulatory data, in contrast to generic representations which may not be optimal for the speech rate estimation. 1-D convolutional filters are used to learn speech rate specific acoustic representations from the raw speech. A convolutional dense neural network (CDNN) is used to estimate the speech rate from the learned representations. In practice, articulatory data is not directly available; thus, we use Acoustic-to-Articulatory Inversion (AAI) to derive the articulatory representations from acoustics. However, these pseudo-articulatory representations are also generic and not optimized for any task. To learn the speech-rate specific pseudo-articulatory representations, we propose a joint training of BLSTM-based AAI and CDNN using a weighted loss function that considers the losses corresponding to speech rate estimation and articulatory prediction. The proposed model yields an improvement in speech rate estimation by ~18.5% in terms of pearson correlation coefficient (CC) compared to the baseline CDNN model with generic articulatory representations as inputs. To utilize complementary information from articulatory features, we further perform experiments by concatenating task-specific acoustic and pseudo-articulatory representations, which yield an improvement in CC by ~2.5% compared to the baseline CDNN model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abner Hernandez|AUTHOR Abner Hernandez]], [[Eun Jung Yeo|AUTHOR Eun Jung Yeo]], [[Sunhee Kim|AUTHOR Sunhee Kim]], [[Minhwa Chung|AUTHOR Minhwa Chung]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2897–2901&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Dysarthria refers to a range of speech disorders mainly affecting articulation. However, impairments are also seen in suprasegmental elements of speech such as prosody. In this study, we examine the effect of using rhythm metrics on detecting dysarthria, and for assessing severity level. Previous studies investigating prosodic irregularities in dysarthria tend to focus on pitch or voice quality measurements. Rhythm is another aspect of prosody which refers to the rhythmic division of speech units into relatively equal time. Speakers with dysarthria tend to have irregular rhythmic patterns that could be useful for detecting dysarthria. We compare the classification accuracy between solely using standard prosodic features against using both standard prosodic features and rhythm-based features, using random forest, support vector machine, and feed-forward neural network. Our best performing classifiers achieved a relative percentage increase of 7.5% and 15% in detection and severity assessment respectively for the QoLT Korean dataset, while the TORGO English dataset had an increase of 4.1% and 3.2%. Results indicate that including rhythmic information can increase accuracy performance regardless of the classifier. Furthermore, we show that rhythm metrics are useful in both Korean and English.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Ma|AUTHOR Yi Ma]], [[Xinzi Xu|AUTHOR Xinzi Xu]], [[Yongfu Li|AUTHOR Yongfu Li]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2902–2906&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Performing an automated adventitious lung sound detection is a challenging task since the sound is susceptible to noises (heartbeat, motion artifacts, and audio sound) and there is subtle discrimination among different categories. An adventitious lung sound classification model, LungRN+NL, is proposed in this work, which has demonstrated a drastic improvement compared to our previous work and the state-of-the-art models. This new model has incorporated the non-local block in the ResNet architecture. To address the imbalance problem and to improve the robustness of the model, we have also incorporated the mixup method to augment the training dataset. Our model has been implemented and compared with the state-of-the-art works using the official ICBHI 2017 challenge dataset and their evaluation method. As a result, LungRN+NL has achieved a performance score of 52.26%, which is improved by 2.1–12.7% compared to the state-of-the-art models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abhayjeet Singh|AUTHOR Abhayjeet Singh]], [[Aravind Illa|AUTHOR Aravind Illa]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2907–2911&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While speaking at different rates, articulators (like tongue, lips) tend to move differently and the enunciations are also of different durations. In the past, affine transformation and DNN have been used to transform articulatory movements from neutral to fast(N2F) and neutral to slow(N2S) speaking rates [1]. In this work, we improve over the existing transformation techniques by modeling rate specific durations and their transformation using AstNet, an encoder-decoder framework with attention. In the current work, we propose an encoder-decoder architecture using LSTMs which generates smoother predicted articulatory trajectories. For modeling duration variations across speaking rates, we deploy attention network, which eliminates the need to align trajectories in different rates using DTW. We perform a phoneme specific duration analysis to examine how well duration is transformed using the proposed AstNet. As the range of articulatory motions is correlated with speaking rate, we also analyze amplitude of the transformed articulatory movements at different rates compared to their original counterparts, to examine how well the proposed AstNet predicts the extent of articulatory movements in N2F and N2S.We observe that AstNet could model both duration and extent of articulatory movements better than the existing transformation techniques resulting in more accurate transformed articulatory trajectories.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zijiang Yang|AUTHOR Zijiang Yang]], [[Shuo Liu|AUTHOR Shuo Liu]], [[Meishu Song|AUTHOR Meishu Song]], [[Emilia Parada-Cabaleiro|AUTHOR Emilia Parada-Cabaleiro]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]
</p><p class="cpabstractcardaffiliationlist">Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2912–2916&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Every year, respiratory diseases affect millions of people worldwide, becoming one of the main causes of death in nowadays society. Currently, the COVID-19 — known as a novel respiratory illness — has triggered a global health crisis, which has been identified as the greatest challenge of our time since the Second World War. COVID-19 and many other respiratory diseases present often common symptoms, which impairs their early diagnosis; thus, restricting their prevention and treatment. In this regard, in order to encourage a faster and more accurate detection of these kinds of diseases, the automatic identification of respiratory illness through the application of machine learning methods is a very promising area of research aimed to support clinicians. With this in mind, we apply attention-based Convolutional Neural Networks for the recognition of adventitious respiratory cycles on the International Conference on Biomedical Health Informatics 2017 challenge database. Experimental results indicate that the architecture of residual networks with attention mechanism achieves a significant improvement w. r. t. the baseline models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Raphael Lenain|AUTHOR Raphael Lenain]], [[Jack Weston|AUTHOR Jack Weston]], [[Abhishek Shivkumar|AUTHOR Abhishek Shivkumar]], [[Emil Fristed|AUTHOR Emil Fristed]]
</p><p class="cpabstractcardaffiliationlist">Novoic, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2917–2921&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce Surfboard, an open-source Python library for extracting audio features with application to the medical domain. Surfboard is written with the aim of addressing pain points of existing libraries and facilitating joint use with modern machine learning frameworks. The package can be accessed both programmatically in Python and via its command line interface, allowing it to be easily integrated within machine learning workflows. It builds on state-of-the-art audio analysis packages and offers multiprocessing support for processing large workloads. We review similar frameworks and describe Surfboard’s architecture, including the clinical motivation for its features. Using the mPower dataset, we illustrate Surfboard’s application to a Parkinson’s disease classification task, highlighting common pitfalls in existing research. The source code is opened up to the research community to facilitate future audio research in the clinical domain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shengkui Zhao|AUTHOR Shengkui Zhao]], [[Trung Hieu Nguyen|AUTHOR Trung Hieu Nguyen]], [[Hao Wang|AUTHOR Hao Wang]], [[Bin Ma|AUTHOR Bin Ma]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2927–2931&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent state-of-the-art neural text-to-speech (TTS) synthesis models have dramatically improved intelligibility and naturalness of generated speech from text. However, building a good bilingual or code-switched TTS for a particular voice is still a challenge. The main reason is that it is not easy to obtain a bilingual corpus from a speaker who achieves native-level fluency in both languages. In this paper, we explore the use of Mandarin speech recordings from a Mandarin speaker, and English speech recordings from another English speaker to build high-quality bilingual and code-switched TTS for both speakers. A Tacotron2-based cross-lingual voice conversion system is employed to generate the Mandarin speaker’s English speech and the English speaker’s Mandarin speech, which show good naturalness and speaker similarity. The obtained bilingual data are then augmented with code-switched utterances synthesized using a Transformer model. With these data, three neural TTS models — Tacotron2, Transformer and FastSpeech are applied for building bilingual and code-switched TTS. Subjective evaluation results show that all the three systems can produce (near-)native-level speech in both languages for each of the speaker.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tomáš Nekvinda|AUTHOR Tomáš Nekvinda]], [[Ondřej Dušek|AUTHOR Ondřej Dušek]]
</p><p class="cpabstractcardaffiliationlist">Charles University, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2972–2976&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce an approach to multilingual speech synthesis which uses the meta-learning concept of contextual parameter generation and produces natural-sounding multilingual speech using more languages and less training data than previous approaches. Our model is based on Tacotron 2 with a fully convolutional input text encoder whose weights are predicted by a separate parameter generator network. To boost voice cloning, the model uses an adversarial speaker classifier with a gradient reversal layer that removes speaker-specific information from the encoder.

We arranged two experiments to compare our model with baselines using various levels of cross-lingual parameter sharing, in order to evaluate: (1) stability and performance when training on low amounts of data, (2) pronunciation accuracy and voice quality of code-switching synthesis. For training, we used the CSS10 dataset and our new small dataset based on Common Voice recordings in five languages. Our model is shown to effectively share information across languages and according to a subjective evaluation test, it produces more natural and accurate code-switching speech than the baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhaoyu Liu|AUTHOR Zhaoyu Liu]], [[Brian Mak|AUTHOR Brian Mak]]
</p><p class="cpabstractcardaffiliationlist">HKUST, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2932–2936&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent studies in multi-lingual and multi-speaker text-to-speech synthesis proposed approaches that use proprietary corpora of performing artists and require fine-tuning to enroll new voices. To reduce these costs, we investigate a novel approach for generating high-quality speeches in multiple languages of speakers enrolled in their native language. In our proposed system, we introduce tone/stress embeddings which extend the language embedding to represent tone and stress information. By manipulating the tone/stress embedding input, our system can synthesize speeches in native accent or foreign accent. To support online enrollment of new speakers, we condition the Tacotron-based synthesizer on speaker embeddings derived from a pre-trained x-vector speaker encoder by transfer learning. We introduce a shared phoneme set to encourage more phoneme sharing compared with the IPA. Our MOS results demonstrate that the native speech in all languages is highly intelligible and natural. We also find L2-norm normalization and ZCA-whitening on x-vectors are helpful to improve the system stability and audio quality. We also find that the WaveNet performance is seemingly language-independent: the WaveNet model trained with any of the three supported languages in our system can be used to generate speeches in the other two languages very well.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruibo Fu|AUTHOR Ruibo Fu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Chunyu Qiang|AUTHOR Chunyu Qiang]], [[Tao Wang|AUTHOR Tao Wang]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2937–2941&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most of current end-to-end speech synthesis assumes the input text is in a single language situation. However, code-switching in speech occurs frequently in routine life, in which speakers switch between languages in the same utterance. And building a large mixed-language speech database is difficult and uneconomical. In this paper, both windowing technique and style token modeling are designed for the code-switching end-to-end speech synthesis. To improve the consistency of speaking style in bilingual situation, compared with the conventional windowing techniques that used fixed constraints, the dynamic attention reweighting soft windowing mechanism is proposed to ensure the smooth transition of code-switching. To compensate the shortage of mixed-language training data, the language dependent style token is designed for the cross-language multi-speaker acoustic modeling, where both the Mandarin and English monolingual data are the extended training data set. The attention gating is proposed to adjust style token dynamically based on the language and the attended context information. Experimental results show that proposed methods lead to an improvement on intelligibility, naturalness and similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marlene Staib|AUTHOR Marlene Staib]]^^1^^, [[Tian Huey Teh|AUTHOR Tian Huey Teh]]^^1^^, [[Alexandra Torresquintero|AUTHOR Alexandra Torresquintero]]^^1^^, [[Devang S. Ram Mohan|AUTHOR Devang S. Ram Mohan]]^^1^^, [[Lorenzo Foglianti|AUTHOR Lorenzo Foglianti]]^^1^^, [[Raphael Lenain|AUTHOR Raphael Lenain]]^^2^^, [[Jiameng Gao|AUTHOR Jiameng Gao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Papercup Technologies, UK; ^^2^^Novoic, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2942–2946&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switching — the intra-utterance use of multiple languages — is prevalent across the world. Within text-to-speech (TTS), multilingual models have been found to enable code-switching [1–3]. By modifying the linguistic input to sequence-to-sequence TTS, we show that code-switching is possible for languages unseen during training, even within monolingual models. We use a small set of phonological features derived from the International Phonetic Alphabet (IPA), such as vowel height and frontness, consonant place and manner. This allows the model topology to stay unchanged for different languages, and enables new, previously unseen feature combinations to be interpreted by the model. We show that this allows us to generate intelligible, code-switched speech in a new language at test time, including the approximation of sounds never seen in training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Detai Xin|AUTHOR Detai Xin]], [[Yuki Saito|AUTHOR Yuki Saito]], [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]], [[Tomoki Koriyama|AUTHOR Tomoki Koriyama]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]
</p><p class="cpabstractcardaffiliationlist">University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2947–2951&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a method for improving the performance of cross-lingual text-to-speech synthesis. Previous works are able to model speaker individuality in speaker space via speaker encoder but suffer from performance decreasing when synthesizing cross-lingual speech. This is because the speaker space formed by all speaker embeddings is completely language-dependent. In order to construct a language-independent speaker space, we regard cross-lingual speech synthesis as a domain adaptation problem and propose a training method to let the speaker encoder adapt speaker embedding of different languages into the same space. Furthermore, to improve speaker individuality and construct a human-interpretable speaker space, we propose a regression method to construct perceptually correlated speaker space. Experimental result demonstrates that our method could not only improve the performance of both cross-lingual and intra-lingual speech but also find perceptually similar speakers beyond languages.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruolan Liu|AUTHOR Ruolan Liu]], [[Xue Wen|AUTHOR Xue Wen]], [[Chunhui Lu|AUTHOR Chunhui Lu]], [[Xiao Chen|AUTHOR Xiao Chen]]
</p><p class="cpabstractcardaffiliationlist">Samsung, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2952–2956&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a system for low-resource multi-speaker cross-lingual text-to-speech synthesis. In particular, we train with monolingual English and Mandarin speakers and synthesize every speaker in both languages. The Mandarin training data is limited to 15 minutes of speech by a female Mandarin speaker. We identify accent carry-over and mispronunciation in low-resource language as two major challenges in this scenario, and address these issues by tone preservation mechanisms and data augmentation, respectively. We apply these techniques to a recent strong multi-lingual baseline and achieve higher ratings in intelligibility and target accent, but slightly lower ratings in cross-lingual speaker similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shubham Bansal|AUTHOR Shubham Bansal]], [[Arijit Mukherjee|AUTHOR Arijit Mukherjee]], [[Sandeepkumar Satpal|AUTHOR Sandeepkumar Satpal]], [[Rupeshkumar Mehta|AUTHOR Rupeshkumar Mehta]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2957–2961&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Regional entities often occur in a code-mixed text in the non-native roman script and synthesizing them with the correct pronunciation and accent is a challenging problem. English grapheme-to-phoneme (G2P) rules fail for such entities because of the orthographical mistakes and phonological differences between the English and regional languages. The traditional approach for this problem involves language identification, followed by the transliteration of the regional entities to their native language and then passing them through a native G2P. In this work, we simplify this module based architecture by learning an end-to-end mixlingual G2P in a multi-task type setting. Also, rather than mapping the output phone sequences from our mixlingual G2P to the English phoneset or using the “shared” phoneset, we use the polyglot data and “separated” phoneset to train a mixlingual synthesizer to improvise the synthesized voice accent for regional entities. We have used Hindi-English as the code-mix scenario and we show absolute incremental gains of up to 28% in pronunciation accuracy and a 0.9 gain in “overall impression” mean-opinion-score (MOS) over using a standard English monolingual text-to-speech (TTS).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anusha Prakash|AUTHOR Anusha Prakash]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]
</p><p class="cpabstractcardaffiliationlist">IIT Madras, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2962–2966&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Building text-to-speech (TTS) synthesisers for Indian languages is a difficult task owing to a large number of active languages. Indian languages can be classified into a finite set of families, prominent among them, Indo-Aryan and Dravidian. The proposed work exploits this property to build a generic TTS system using multiple languages from the same family in an end-to-end framework. Generic systems are quite robust as they are capable of capturing a variety of phonotactics across languages. These systems are then adapted to a new language in the same family using small amounts of adaptation data. Experiments indicate that good quality TTS systems can be built using only 7 minutes of adaptation data. An average degradation mean opinion score of 3.98 is obtained for the adapted TTSes.

Extensive analysis of systematic interactions between languages in the generic TTSes is carried out. x-vectors are included as speaker embedding to synthesise text in a particular speaker’s voice. An interesting observation is that the prosody of the target speaker’s voice is preserved. These results are quite promising as they indicate the capability of generic TTSes to handle speaker and language switching seamlessly, along with the ease of adaptation to a new language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marcel de Korte|AUTHOR Marcel de Korte]], [[Jaebok Kim|AUTHOR Jaebok Kim]], [[Esther Klabbers|AUTHOR Esther Klabbers]]
</p><p class="cpabstractcardaffiliationlist">ReadSpeaker, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2967–2971&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advances in neural TTS have led to models that can produce high-quality synthetic speech. However, these models typically require large amounts of training data, which can make it costly to produce a new voice with the desired quality. Although multi-speaker modeling can reduce the data requirements necessary for a new voice, this approach is usually not viable for many low-resource languages for which abundant multi-speaker data is not available. In this paper, we therefore investigated to what extent multilingual multi-speaker modeling can be an alternative to monolingual multi-speaker modeling, and explored how data from foreign languages may best be combined with low-resource language data. We found that multilingual modeling can increase the naturalness of low-resource language speech, showed that multilingual models can produce speech with a naturalness comparable to monolingual multi-speaker models, and saw that the target language naturalness was affected by the strategy used to add foreign language data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joon Son Chung|AUTHOR Joon Son Chung]]^^1^^, [[Jaesung Huh|AUTHOR Jaesung Huh]]^^2^^, [[Seongkyu Mun|AUTHOR Seongkyu Mun]]^^2^^, [[Minjae Lee|AUTHOR Minjae Lee]]^^2^^, [[Hee-Soo Heo|AUTHOR Hee-Soo Heo]]^^2^^, [[Soyeon Choe|AUTHOR Soyeon Choe]]^^2^^, [[Chiheon Ham|AUTHOR Chiheon Ham]]^^2^^, [[Sunghwan Jung|AUTHOR Sunghwan Jung]]^^2^^, [[Bong-Jin Lee|AUTHOR Bong-Jin Lee]]^^2^^, [[Icksang Han|AUTHOR Icksang Han]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Naver, Korea; ^^2^^Naver, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2977–2981&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The objective of this paper is ‘open-set’ speaker recognition of unseen speakers, where ideal embeddings should be able to condense information into a compact utterance-level representation that has small intra-speaker and large inter-speaker distance.

A popular belief in speaker recognition is that networks trained with classification objectives outperform metric learning methods. In this paper, we present an extensive evaluation of most popular loss functions for speaker recognition on the VoxCeleb dataset. We demonstrate that the vanilla triplet loss shows competitive performance compared to classification-based losses, and those trained with our proposed metric learning objective outperform state-of-the-art methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seong Min Kye|AUTHOR Seong Min Kye]], [[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Hae Beom Lee|AUTHOR Hae Beom Lee]], [[Sung Ju Hwang|AUTHOR Sung Ju Hwang]], [[Hoirin Kim|AUTHOR Hoirin Kim]]
</p><p class="cpabstractcardaffiliationlist">KAIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2982–2986&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In practical settings, a speaker recognition system needs to identify a speaker given a short utterance, while the enrollment utterance may be relatively long. However, existing speaker recognition models perform poorly with such short utterances. To solve this problem, we introduce a meta-learning framework for imbalance length pairs. Specifically, we use a Prototypical Networks and train it with a support set of long utterances and a query set of short utterances of varying lengths. Further, since optimizing only for the classes in the given episode may be insufficient for learning discriminative embeddings for unseen classes, we additionally enforce the model to classify both the support and the query set against the entire set of classes in the training set. By combining these two learning schemes, our model outperforms existing state-of-the-art speaker verification models learned with a standard supervised learning framework on short utterance (1-2 seconds) on the VoxCeleb datasets. We also validate our proposed model for unseen speaker identification, on which it also achieves significant performance gains over the existing approaches. The codes are available at https://github.com/seongmin-kye/meta-SR</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kai Li|AUTHOR Kai Li]]^^1^^, [[Masato Akagi|AUTHOR Masato Akagi]]^^1^^, [[Yibo Wu|AUTHOR Yibo Wu]]^^2^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^JAIST, Japan; ^^2^^Tianjin University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2987–2991&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker embeddings extracted from neural network (NN) achieve excellent performance on general speaker verification (SV) missions. Most current SV systems use only speaker labels. Therefore, the interaction between different types of domain information decrease the prediction accuracy of SV. To overcome this weakness and improve SV performance, four effective SV systems were proposed by using gender, nationality, and emotion information to add more constraints in the NN training stage. More specifically, multitask learning-based systems which including multitask gender (MTG), multitask nationality (MTN) and multitask gender and nationality (MTGN) were used to enhance gender and nationality information learning. Domain adversarial training-based system which including emotion domain adversarial training (EDAT) was used to suppress different emotions information learning. Experimental results indicate that encouraging gender and nationality information and suppressing emotion information learning improve the performance of SV. In the end, our proposed systems achieved 16.4 and 22.9% relative improvements in the equal error rate for MTL- and DAT-based systems, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanpei Shi|AUTHOR Yanpei Shi]], [[Qiang Huang|AUTHOR Qiang Huang]], [[Thomas Hain|AUTHOR Thomas Hain]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2992–2996&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Identifying multiple speakers without knowing where a speaker’s voice is in a recording is a challenging task. In this paper, a hierarchical attention network is proposed to solve a weakly labelled speaker identification problem. The use of a hierarchical structure, consisting of a frame-level encoder and a segment-level encoder, aims to learn speaker related information locally and globally. Speech streams are segmented into fragments. The frame-level encoder with attention learns features and highlights the target related frames locally, and output a fragment based embedding. The segment-level encoder works with a second attention layer to emphasize the fragments probably related to target speakers. The global information is finally collected from segment-level module to predict speakers via a classifier. To evaluate the effectiveness of the proposed approach, artificial datasets based on Switchboard Cellular part1 (SWBC) and Voxceleb1 are constructed in two conditions, where speakers’ voices are overlapped and not overlapped. Comparing to two baselines the obtained results show that the proposed approach can achieve better performances. Moreover, further experiments are conducted to evaluate the impact of utterance segmentation. The results show that a reasonable segmentation can slightly improve identification performances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ana Montalvo|AUTHOR Ana Montalvo]]^^1^^, [[Jose R. Calvo|AUTHOR Jose R. Calvo]]^^1^^, [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CENATAV, Cuba; ^^2^^LIA (EA 4128), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2997–3001&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech is a complex signal conveying numerous information about the message but also various characteristics of the speaker: its sex, age, accent, language. Understanding the use of these features by machine learning (ML) systems has two main advantages. First, it could help prevent bias and discrimination in ML speech applications. Second, joint modeling of this information using multitasking learning approaches (MTL) has great potential for improvement. We explore in this paper the use of MTL in non-linguistic tasks. We compare single- and multi-task models applied to three tasks: (spanish) nativeness, speaker and sex. The effect of training data set size in the performance of both single- and multi-task models is investigated as well as the specific contribution of nativeness and sex information to speaker recognition. Experimental results show that multi-task (MTL) models outperform single task models. We have also found that MTL is beneficial for small training data sets and for low-level acoustic features rather than for pretrained features such as bottleneck ones. Our results indicate also that more attention should be addressed to the information used by ML approaches in order to prevent biases or discrimination.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Umair Khan|AUTHOR Umair Khan]], [[Javier Hernando|AUTHOR Javier Hernando]]
</p><p class="cpabstractcardaffiliationlist">Universitat Politècnica de Catalunya, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3002–3006&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker labeled background data is an essential requirement for most state-of-the-art approaches in speaker recognition, e.g., x-vectors and i-vector/PLDA. However, in reality it is difficult to access large amount of labeled data. In this work, we propose siamese networks for speaker verification without using speaker labels. We propose two different siamese networks having two and three branches, respectively, where each branch is a CNN encoder. Since the goal is to avoid speaker labels, we propose to generate the training pairs in an unsupervised manner. The client samples are selected within one database according to highest cosine scores with the anchor in i-vector space. The impostor samples are selected in the same way but from another database. Our double-branch siamese performs binary classification using cross entropy loss during training. In testing phase, we obtain speaker verification scores directly from its output layer. Whereas, our triple-branch siamese is trained to learn speaker embeddings using triplet loss. During testing, we extract speaker embeddings from its output layer, which are scored in the experiments using cosine scoring. The evaluation is performed on VoxCeleb-1 database, which show that using the proposed unsupervised systems, solely or in fusion, the results get closer to supervised baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ying Liu|AUTHOR Ying Liu]]^^1^^, [[Yan Song|AUTHOR Yan Song]]^^1^^, [[Yiheng Jiang|AUTHOR Yiheng Jiang]]^^1^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^1^^, [[Lin Liu|AUTHOR Lin Liu]]^^2^^, [[Li-Rong Dai|AUTHOR Li-Rong Dai]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^iFLYTEK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3007–3011&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep embedding learning based speaker verification methods have attracted significant recent research interest due to their superior performance. Existing methods mainly focus on designing frame-level feature extraction structures, utterance-level aggregation methods and loss functions to learn discriminative speaker embeddings. The scores of verification trials are then computed using cosine distance or Probabilistic Linear Discriminative Analysis (PLDA) classifiers. This paper proposes an effective speaker recognition method which is based on joint identification and verification supervisions, inspired by multi-task learning frameworks. Specifically, a deep architecture with convolutional feature extractor, attentive pooling and two classifier branches is presented. The first, an identification branch, is trained with additive margin softmax loss (AM-Softmax) to classify the speaker identities. The second, a verification branch, trains a discriminator with binary cross entropy loss (BCE) to optimize a new triplet-based mutual information. To balance the two losses during different training stages, a ramp-up/ramp-down weighting scheme is employed. Furthermore, an attentive bilinear pooling method is proposed to improve the effectiveness of embeddings. Extensive experiments have been conducted on VoxCeleb1 to evaluate the proposed method, demonstrating results that relatively reduce the equal error rate (EER) by 22% compared to the baseline system using identification supervision only.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naijun Zheng|AUTHOR Naijun Zheng]]^^1^^, [[Xixin Wu|AUTHOR Xixin Wu]]^^1^^, [[Jinghua Zhong|AUTHOR Jinghua Zhong]]^^2^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^SpeechX, China; ^^3^^CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3012–3016&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Linear discriminant analysis (LDA) is an effective and widely used discriminative technique for speaker verification. However, it only utilizes the information on global structure to perform classification. Some variants of LDA, such as local pairwise LDA (LPLDA), are proposed to preserve more information on the local structure in the linear projection matrix. However, considering that the local structure may vary a lot in different regions, summing up related components to construct a single projection matrix may not be sufficient. In this paper, we present a speaker-aware strategy focusing on preserving distinct information on local structure in a set of linear discriminant projection matrices, and allocating them to different local regions for dimension reduction and classification. Experiments on NIST SRE2010 and NIST SRE2016 show that the speaker-aware strategy can boost the performance of both LDA and LPLDA backends in i-vector systems and x-vector systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhengyang Chen|AUTHOR Zhengyang Chen]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3017–3021&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker verification systems usually suffer from large performance degradation when applied to a new dataset from a different domain. In this work, we will study the domain adaption strategy between datasets with different languages using domain adversarial training. We introduce a partially shared network based domain adversarial training architecture to learn an asymmetric mapping for source and target domain embedding extractor. This architecture can help the embedding extractor learn domain invariant feature without sacrificing the ability on speaker discrimination. When doing the evaluation on cross-lingual domain adaption, the source domain data is in English from NIST SRE04-10 and Switchboard, and the target domain data is in Cantonese and Tagalog from NIST SRE16. Our results show that the usual adversarial training mode will indeed harm the speaker discrimination when the source and target domain embedding extractors are fully shared, and in contrast the newly proposed architecture solves this problem and achieves ~25.0% relative average Equal Error Rate (EER) improvement on SRE16 Cantonese and Tagalog evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Menglong Xu|AUTHOR Menglong Xu]], [[Xiao-Lei Zhang|AUTHOR Xiao-Lei Zhang]]
</p><p class="cpabstractcardaffiliationlist">Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2547–2551&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>One difficult problem of keyword spotting is how to miniaturize its memory footprint while maintain a high precision. Although convolutional neural networks have shown to be effective to the small-footprint keyword spotting problem, they still need hundreds of thousands of parameters to achieve good performance. In this paper, we propose an efficient model based on depthwise separable convolution layers and squeeze-and-excitation blocks. Specifically, we replace the standard convolution by the depthwise separable convolution, which reduces the number of the parameters of the standard convolution without significant performance degradation. We further improve the performance of the depthwise separable convolution by reweighting the output feature maps of the first convolution layer with a so-called squeeze-and-excitation block. We compared the proposed method with five representative models on two experimental settings of the Google Speech Commands dataset. Experimental results show that the proposed method achieves the state-of-the-art performance. For example, it achieves a classification error rate of 3.29% with a number of parameters of 72K in the first experiment, which significantly outperforms the comparison methods given a similar model size. It achieves an error rate of 3.97% with a number of parameters of 10K, which is also slightly better than the state-of-the-art comparison method given a similar model size.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takuya Higuchi|AUTHOR Takuya Higuchi]], [[Mohammad Ghasemzadeh|AUTHOR Mohammad Ghasemzadeh]], [[Kisun You|AUTHOR Kisun You]], [[Chandra Dhir|AUTHOR Chandra Dhir]]
</p><p class="cpabstractcardaffiliationlist">Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2592–2596&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a stacked 1D convolutional neural network (S1DCNN) for end-to-end small footprint voice trigger detection in a streaming scenario. Voice trigger detection is an important speech application, with which users can activate their devices by simply saying a keyword or phrase. Due to privacy and latency reasons, a voice trigger detection system should run on an always-on processor on device. Therefore, having small memory and compute cost is crucial for a voice trigger detection system. Recently, singular value decomposition filters (SVDFs) has been used for end-to-end voice trigger detection. The SVDFs approximate a fully-connected layer with a low rank approximation, which reduces the number of model parameters. In this work, we propose S1DCNN as an alternative approach for end-to-end small-footprint voice trigger detection. An S1DCNN layer consists of a 1D convolution layer followed by a depth-wise 1D convolution layer. We show that the SVDF can be expressed as a special case of the S1DCNN layer. Experimental results show that the S1DCNN achieve 19.0% relative false reject ratio (FRR) reduction with a similar model size and a similar time delay compared to the SVDF. By using longer time delays, the S1DCNN further improve the FRR up to 12.2% relative.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Théodore Bluche|AUTHOR Théodore Bluche]], [[Thibault Gisselbrecht|AUTHOR Thibault Gisselbrecht]]
</p><p class="cpabstractcardaffiliationlist">Sonos, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2552–2556&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a fully-neural approach to open-vocabulary keyword spotting, that allows the users to include a customizable voice interface to their device and that does not require task-specific data. We present a keyword detection neural network weighing less than 250KB, in which the topmost layer performing keyword detection is predicted by an auxiliary network, that may be run offline to generate a detector for any keyword. We show that the proposed model outperforms acoustic keyword spotting baselines by a large margin on two tasks of detecting keywords in utterances and three tasks of detecting isolated speech commands. We also propose a method to fine-tune the model when specific training data is available for some keywords, which yields a performance similar to a standard speech command neural network while keeping the ability of the model to be applied to new keywords.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Özgür Bora Gevrek|AUTHOR Özgür Bora Gevrek]], [[Jibin Wu|AUTHOR Jibin Wu]], [[Yuxiang Chen|AUTHOR Yuxiang Chen]], [[Xuanbo Meng|AUTHOR Xuanbo Meng]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2557–2561&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates the use of deep convolutional spiking neural networks (SNN) for keyword spotting (KWS) and wakeword detection tasks. The brain-inspired SNN mimic the spike-based information processing of biological neural networks and they can operate on the emerging ultra-low power neuromorphic chips. Unlike conventional artificial neural networks (ANN), SNN process input information asynchronously in an event-driven manner. With temporally sparse input information, this event-driven processing substantially reduces the computational requirements compared to the synchronous computation performed in ANN-based KWS approaches. To explore the effectiveness and computational complexity of SNN on KWS and wakeword detection, we compare the performance and computational costs of spiking fully-connected and convolutional neural networks with ANN counterparts under clean and noisy testing conditions. The results obtained on the Speech Commands and Hey Snips corpora have shown the effectiveness of the convolutional SNN model compared to a conventional CNN with comparable performance on KWS and better performance on the wakeword detection task. With its competitive performance and reduced computational complexity, convolutional SNN models running on energy-efficient neuromorphic hardware offer a low-power and effective solution for mobile KWS applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haiwei Wu|AUTHOR Haiwei Wu]]^^1^^, [[Yan Jia|AUTHOR Yan Jia]]^^1^^, [[Yuanfei Nie|AUTHOR Yuanfei Nie]]^^2^^, [[Ming Li|AUTHOR Ming Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Duke Kunshan University, China; ^^2^^Montage Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2562–2566&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we focus on the task of small-footprint keyword spotting under the far-field scenario. Far-field environments are commonly encountered in real-life speech applications, causing severe degradation of performance due to room reverberation and various kinds of noises. Our baseline system is built on the convolutional neural network trained with pooled data of both far-field and close-talking speech. To cope with the distortions, we develop three domain aware training systems, including the domain embedding system, the deep CORAL system, and the multi-task learning system. These methods incorporate domain knowledge into network training and improve the performance of the keyword classifier on far-field conditions. Experimental results show that our proposed methods manage to maintain the performance on the close-talking speech and achieve significant improvement on the far-field test set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kun Zhang|AUTHOR Kun Zhang]]^^1^^, [[Zhiyong Wu|AUTHOR Zhiyong Wu]]^^1^^, [[Daode Yuan|AUTHOR Daode Yuan]]^^2^^, [[Jian Luan|AUTHOR Jian Luan]]^^2^^, [[Jia Jia|AUTHOR Jia Jia]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^, [[Binheng Song|AUTHOR Binheng Song]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2567–2571&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The training process of end-to-end keyword spotting (KWS) suffers from critical data imbalance problem that positive samples are far less than negative samples where different negative samples are not of equal importance. During decoding, false alarms are mainly caused by a small number of //important negative samples// having pronunciation similar to the keyword; however, the training loss is dominated by the majority of negative samples whose pronunciation is not related to the keyword, called //unimportant negative samples//. This inconsistency greatly degrades the performance of KWS and existing methods like focal loss don’t discriminate between the two kinds of negative samples. To deal with the problem, we propose a novel re-weighted interval loss to re-weight sample loss considering the performance of the classifier over local interval of negative utterance, which automatically down-weights the losses of unimportant negative samples and focuses training on important negative samples that are prone to produce false alarms during decoding. Evaluations on Hey Snips dataset demonstrate that our approach has yielded a superior performance over focal loss baseline with 34% (@0.5 false alarm per hour) relative reduction of false reject rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peng Zhang|AUTHOR Peng Zhang]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]]
</p><p class="cpabstractcardaffiliationlist">Inner Mongolia University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2572–2576&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword spotting (KWS) is a very important technique for human–machine interaction to detect a trigger phrase and voice commands. In practice, a popular demand for KWS is to conveniently define the keywords by consumers or device vendors. In this paper, we propose a novel template matching approach for KWS based on end-to-end deep learning method, which utilizes an attention mechanism to match the input voice to the keyword templates in high-level feature space. The proposed approach only requires very limited voice samples (at least only one sample) to register a new keyword without any retraining. We conduct experiments on the publicly available Google speech commands dataset. The experimental results demonstrate that our method outperforms baseline methods while allowing for a flexible configuration.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chen Yang|AUTHOR Chen Yang]], [[Xue Wen|AUTHOR Xue Wen]], [[Liming Song|AUTHOR Liming Song]]
</p><p class="cpabstractcardaffiliationlist">Samsung, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2577–2581&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a robust small-footprint keyword spotting system for resource-constrained devices. Small footprint is achieved by the use of depthwise-separable convolutions in a ResNet framework. Noise robustness is achieved with a multi-scale ensemble of classifiers: each classifier is specialized for a different view of the input, while the whole ensemble remains compact in size by heavy parameter sharing. Extensive experiments on public Google Command dataset demonstrate the effectiveness of our proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yangbin Chen|AUTHOR Yangbin Chen]]^^1^^, [[Tom Ko|AUTHOR Tom Ko]]^^2^^, [[Lifeng Shang|AUTHOR Lifeng Shang]]^^3^^, [[Xiao Chen|AUTHOR Xiao Chen]]^^3^^, [[Xin Jiang|AUTHOR Xin Jiang]]^^3^^, [[Qing Li|AUTHOR Qing Li]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CityU, China; ^^2^^SUSTech, China; ^^3^^Huawei Technologies, China; ^^4^^PolyU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2582–2586&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we investigate the feasibility of applying few-shot learning algorithms to a speech task. We formulate a user-defined scenario of spoken term classification as a few-shot learning problem. In most few-shot learning studies, it is assumed that all the N classes are new in a N-way problem. We suggest that this assumption can be relaxed and define a N+M-way problem where N and M are the number of new classes and fixed classes respectively. We propose a modification to the Model-Agnostic Meta-Learning (MAML) algorithm to solve the problem. Experiments on the Google Speech Commands dataset show that our approach¹ outperforms the conventional supervised learning approach and the original MAML.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zeyu Zhao|AUTHOR Zeyu Zhao]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2587–2591&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword search (KWS) means searching for the keywords given by the user from continuous speech. Conventional KWS systems based on automatic speech recognition (ASR) decode input speech by ASR before searching for keywords. With deep neural network (DNN) becoming increasingly popular, some end-to-end (E2E) KWS emerged. The main advantage of E2E KWS is to avoid speech recognition. Since E2E KWS systems are at the very beginning, the performance is currently not as good as traditional methods, so there is still loads of work to do. To this end, we propose an E2E KWS model consists of four parts, including speech encoder-decoder, query encoder-decoder, attention mechanism and energy scorer. Different from the baseline system using auto-encoder to extract embeddings, the proposed model extracts embeddings that contain character sequence information by encode-decoder. Attention mechanism and a novel energy scorer are also introduced in the model, where the former can locate the keywords, and the latter can make the final decision. We train the models on low resource condition with only about 10-hour training data in various languages. The experiment results show that the proposed model outperforms the baseline system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Luo|AUTHOR Yi Luo]], [[Nima Mesgarani|AUTHOR Nima Mesgarani]]
</p><p class="cpabstractcardaffiliationlist">Columbia University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2622–2626&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many recent source separation systems are designed to separate a fixed number of sources out of a mixture. In the cases where the source activation patterns are unknown, such systems have to either adjust the number of outputs or to identify invalid outputs from the valid ones. Iterative separation methods have gain much attention in the community as they can flexibly decide the number of outputs, however (1) they typically rely on long-term information to determine the stopping time for the iterations, which makes them hard to operate in a causal setting; (2) they lack a “fault tolerance” mechanism when the estimated number of sources is different from the actual number. In this paper, we propose a simple training method, the auxiliary autoencoding permutation invariant training (A2PIT), to alleviate the two issues. A2PIT assumes a fixed number of outputs and uses auxiliary autoencoding loss to force the invalid outputs to be the copies of the input mixture, and detects invalid outputs in a fully unsupervised way during inference phase. Experiment results show that A2PIT is able to improve the separation performance across various numbers of speakers and effectively detect the number of speakers in a mixture.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jingjing Chen|AUTHOR Jingjing Chen]], [[Qirong Mao|AUTHOR Qirong Mao]], [[Dong Liu|AUTHOR Dong Liu]]
</p><p class="cpabstractcardaffiliationlist">Jiangsu University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2627–2631&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Time-domain approaches for speech separation have achieved great success recently. However, the sources separated by these time-domain approaches usually contain some artifacts (broadband noises), especially when separating mixture with noise. In this paper, we incorporate synthesis way into the time-domain speech separation approaches to deal with above broadband noises in separated sources, which can be seamlessly used in the speech separation system by a ‘plug-and-play’ way. By directly learning an estimation for each source in encoded domain, synthesis way can reduce artifacts in estimated speeches and improve the speech separation performance. Extensive experiments on different state-of-the-art models reveal that the synthesis way acquires the ability to handle with noisy mixture and is more suitable for noisy speech separation. On a new benchmark noisy dataset, the synthesis way obtains 0.97 dB (10.1%) SDR relative improvement and respective gains on various metrics without extra computation cost.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jun Wang|AUTHOR Jun Wang]]
</p><p class="cpabstractcardaffiliationlist">Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2632–2636&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Can better representations be learnt from worse interfering scenarios? To verify this seeming paradox, we propose a novel framework that performed compositional learning in traditionally independent tasks of speech separation and speaker identification. In this framework, generic pre-training and compositional fine-tuning are proposed to mimic the bottom-up and top-down processes of a human’s cocktail party effect. Moreover, we investigate schemes to prohibit the model from ending up learning an easier identity-prediction task. Substantially discriminative and generalizable representations can be learnt in severely interfering conditions. Experiment results on downstream tasks show that our learnt representations have superior discriminative power than a standard speaker verification method. Meanwhile, RISE achieves higher SI-SNRi consistently in different inference modes over DPRNN, a state-of-the-art speech separation system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Manuel Pariente|AUTHOR Manuel Pariente]]^^1^^, [[Samuele Cornell|AUTHOR Samuele Cornell]]^^2^^, [[Joris Cosentino|AUTHOR Joris Cosentino]]^^1^^, [[Sunit Sivasankaran|AUTHOR Sunit Sivasankaran]]^^1^^, [[Efthymios Tzinis|AUTHOR Efthymios Tzinis]]^^3^^, [[Jens Heitkaemper|AUTHOR Jens Heitkaemper]]^^4^^, [[Michel Olvera|AUTHOR Michel Olvera]]^^1^^, [[Fabian-Robert Stöter|AUTHOR Fabian-Robert Stöter]]^^5^^, [[Mathieu Hu|AUTHOR Mathieu Hu]]^^1^^, [[Juan M. Martín-Doñas|AUTHOR Juan M. Martín-Doñas]]^^6^^, [[David Ditter|AUTHOR David Ditter]]^^7^^, [[Ariel Frank|AUTHOR Ariel Frank]]^^8^^, [[Antoine Deleforge|AUTHOR Antoine Deleforge]]^^1^^, [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Loria (UMR 7503), France; ^^2^^Università Politecnica delle Marche, Italy; ^^3^^University of Illinois at Urbana-Champaign, USA; ^^4^^Universität Paderborn, Germany; ^^5^^LIRMM (UMR 5506), France; ^^6^^Universidad de Granada, Spain; ^^7^^Universität Hamburg, Germany; ^^8^^Technion, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2637–2641&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes Asteroid, the PyTorch-based audio source separation toolkit for researchers. Inspired by the most successful neural source separation systems, it provides all neural building blocks required to build such a system. To improve reproducibility, Kaldi-style recipes on common audio source separation datasets are also provided. This paper describes the software architecture of Asteroid and its most important features. By showing experimental results obtained with Asteroid’s recipes, we show that our implementations are at least on par with most results reported in reference papers. The toolkit is publicly available at https://github.com/mpariente/asteroid.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jingjing Chen|AUTHOR Jingjing Chen]], [[Qirong Mao|AUTHOR Qirong Mao]], [[Dong Liu|AUTHOR Dong Liu]]
</p><p class="cpabstractcardaffiliationlist">Jiangsu University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2642–2646&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The dominant speech separation models are based on complex recurrent or convolution neural network that model speech sequences indirectly conditioning on context, such as passing information through many intermediate states in recurrent neural network, leading to suboptimal separation performance. In this paper, we propose a dual-path transformer network (DPTNet) for end-to-end speech separation, which introduces direct context-awareness in the modeling for speech sequences. By introduces a improved transformer, elements in speech sequences can interact directly, which enables DPTNet can model for the speech sequences with direct context-awareness. The improved transformer in our approach learns the order information of the speech sequences without positional encodings by incorporating a recurrent neural network into the original transformer. In addition, the structure of dual paths makes our model efficient for extremely long speech sequence modeling. Extensive experiments on benchmark datasets show that our approach outperforms the current state-of-the-arts (20.6 dB SDR on the public WSj0-2mix data corpus).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chengyun Deng|AUTHOR Chengyun Deng]], [[Yi Zhang|AUTHOR Yi Zhang]], [[Shiqian Ma|AUTHOR Shiqian Ma]], [[Yongtao Sha|AUTHOR Yongtao Sha]], [[Hui Song|AUTHOR Hui Song]], [[Xiangang Li|AUTHOR Xiangang Li]]
</p><p class="cpabstractcardaffiliationlist">DiDi Chuxing, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2647–2651&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Conv-TasNet has showed competitive performance on single-channel speech source separation. In this paper, we investigate to further improve separation performance by optimizing the training mechanism with the same network structure. Motivated by the successful applications of generative adversarial networks (GANs) on speech enhancement tasks, we propose a novel Separative Adversarial Network called Conv-TasSAN, in which the separator is realized by using Conv-TasNet architecture. The discriminator is involved to optimize the separator with respect to specific speech objective metric. It makes the separator network capture the distribution information of speech sources more accurately, and also prevents over-smoothing problems. Experiments on WSJ0-2mix dataset confirm the superior performance of the proposed method over Conv-TasNet in terms of SI-SNR and PESQ improvement.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]]^^1^^, [[Thilo von Neumann|AUTHOR Thilo von Neumann]]^^2^^, [[Marc Delcroix|AUTHOR Marc Delcroix]]^^1^^, [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]^^1^^, [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTT, Japan; ^^2^^Universität Paderborn, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2652–2656&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, the source separation performance was greatly improved by time-domain audio source separation based on dual-path recurrent neural network (DPRNN). DPRNN is a simple but effective model for a long sequential data. While DPRNN is quite efficient in modeling a sequential data of the length of an utterance, i.e., about 5 to 10 second data, it is harder to apply it to longer sequences such as whole conversations consisting of multiple utterances. It is simply because, in such a case, the number of time steps consumed by its internal module called inter-chunk RNN becomes extremely large. To mitigate this problem, this paper proposes a multi-path RNN (MPRNN), a generalized version of DPRNN, that models the input data in a hierarchical manner. In the MPRNN framework, the input data is represented at several (≥3) time-resolutions, each of which is modeled by a specific RNN sub-module. For example, the RNN sub-module that deals with the finest resolution may model temporal relationship only within a phoneme, while the RNN sub-module handling the most coarse resolution may capture only the relationship between utterances such as speaker information. We perform experiments using simulated dialogue-like mixtures and show that MPRNN has greater model capacity, and it outperforms the current state-of-the-art DPRNN framework especially in online processing scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vivek Narayanaswamy|AUTHOR Vivek Narayanaswamy]]^^1^^, [[Jayaraman J. Thiagarajan|AUTHOR Jayaraman J. Thiagarajan]]^^2^^, [[Rushil Anirudh|AUTHOR Rushil Anirudh]]^^2^^, [[Andreas Spanias|AUTHOR Andreas Spanias]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Arizona State University, USA; ^^2^^LLNL, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2657–2661&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art under-determined audio source separation systems rely on supervised end to end training of carefully tailored neural network architectures operating either in the time or the spectral domain. However, these methods are severely challenged in terms of requiring access to expensive source level labeled data and being specific to a given set of sources and the mixing process, which demands complete re-training when those assumptions change. This strongly emphasizes the need for unsupervised methods that can leverage the recent advances in data-driven modeling, and compensate for the lack of labeled data through meaningful priors. To this end, we propose a novel approach for audio source separation based on generative priors trained on individual sources. Through the use of projected gradient descent optimization, our approach simultaneously searches in the source-specific latent spaces to effectively recover the constituent sources. Though the generative priors can be defined in the time domain directly, e.g. WaveGAN, we find that using spectral domain loss functions for our optimization leads to good-quality source estimates. Our empirical studies on standard spoken digit and instrument datasets clearly demonstrate the effectiveness of our approach over classical as well as state-of-the-art unsupervised baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuanhang Qiu|AUTHOR Yuanhang Qiu]], [[Ruili Wang|AUTHOR Ruili Wang]]
</p><p class="cpabstractcardaffiliationlist">Massey University, New Zealand</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2662–2666&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a novel adversarial latent representation learning (ALRL) method for speech enhancement. Based on adversarial feature learning, ALRL employs an extra encoder to learn an inverse mapping from the generated data distribution to the latent space. The encoder builds an inner connection with the generator, and provides relevant latent information for adversarial feature modelling. A new loss function is proposed to implement the encoder mapping simultaneously. In addition, the multi-head self-attention is also applied to the encoder for learning of long-range dependencies and further effective adversarial representations. The experimental results demonstrate that ALRL outperforms current GAN-based speech enhancement methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jing Shi|AUTHOR Jing Shi]]^^1^^, [[Jiaming Xu|AUTHOR Jiaming Xu]]^^1^^, [[Yusuke Fujita|AUTHOR Yusuke Fujita]]^^2^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^3^^, [[Bo Xu|AUTHOR Bo Xu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Hitachi, Japan; ^^3^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2707–2711&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech separation has been extensively explored to tackle the cocktail party problem. However, these studies are still far from having enough generalization capabilities for real scenarios. In this work, we raise a common strategy named Speaker-Conditional Chain Model to process complex speech recordings. In the proposed method, our model first infers the identities of variable numbers of speakers from the observation based on a sequence-to-sequence model. Then, it takes the information from the inferred speakers as conditions to extract their speech sources. With the predicted speaker information from whole observation, our model is helpful to solve the problem of conventional speech separation and speaker extraction for multi-round long recordings. The experiments from standard fully-overlapped speech separation benchmarks show comparable results with prior studies, while our proposed model gets better adaptability for multi-round long recordings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yang Xiang|AUTHOR Yang Xiang]]^^1^^, [[Liming Shi|AUTHOR Liming Shi]]^^1^^, [[Jesper Lisby Højvang|AUTHOR Jesper Lisby Højvang]]^^2^^, [[Morten Højfeldt Rasmussen|AUTHOR Morten Højfeldt Rasmussen]]^^2^^, [[Mads Græsbøll Christensen|AUTHOR Mads Græsbøll Christensen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalborg University, Denmark; ^^2^^Capturi, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2667–2671&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a novel supervised Non-negative Matrix Factorization (NMF) speech enhancement method, which is based on Hidden Markov Model (HMM) and Kullback-Leibler (KL) divergence (NMF-HMM). Our algorithm applies the HMM to capture the timing information, so the temporal dynamics of speech signal can be considered by comparing with the traditional NMF-based speech enhancement method. More specifically, the sum of Poisson, leading to the KL divergence measure, is used as the observation model for each state of HMM. This ensures that the parameter update rule of the proposed algorithm is identical to the multiplicative update rule, which is quick and efficient. In the training stage, this update rule is applied to train the NMF-HMM model. In the online enhancement stage, a novel minimum mean-square error (MMSE) estimator that combines the NMF-HMM is proposed to conduct speech enhancement. The performance of the proposed algorithm is evaluated by perceptual evaluation of speech quality (PESQ) and short-time objective intelligibility (STOI). The experimental results indicate that the STOI score of proposed strategy is able to outperform 7% than current state-of-the-art NMF-based speech enhancement methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lu Zhang|AUTHOR Lu Zhang]], [[Mingjiang Wang|AUTHOR Mingjiang Wang]]
</p><p class="cpabstractcardaffiliationlist">Harbin Institute of Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2672–2676&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Capturing the temporal dependence of speech signals is of great importance for numerous speech related tasks. This paper proposes a more effective temporal modeling method for causal speech enhancement system. We design a forward stacked temporal convolutional network (TCN) model which exploits multi-scale temporal analysis in each residual block. This model incorporates a multi-scale dilated convolution to better track the target speech through its context information from past frames. Applying multi-target learning of log power spectrum (LPS) and ideal ratio mask (IRM) further improves model robustness, due to the complementarity among the tasks. Experimental results show that the proposed TCN model not only performs better speech reconstruction ability in terms of speech quality and speech intelligibility, but also has smaller model size than that of long short-term memory (LSTM) network and the gated recurrent units (GRU) network.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Quan Wang|AUTHOR Quan Wang]], [[Ignacio Lopez Moreno|AUTHOR Ignacio Lopez Moreno]], [[Mert Saglam|AUTHOR Mert Saglam]], [[Kevin Wilson|AUTHOR Kevin Wilson]], [[Alan Chiao|AUTHOR Alan Chiao]], [[Renjie Liu|AUTHOR Renjie Liu]], [[Yanzhang He|AUTHOR Yanzhang He]], [[Wei Li|AUTHOR Wei Li]], [[Jason Pelecanos|AUTHOR Jason Pelecanos]], [[Marily Nika|AUTHOR Marily Nika]], [[Alexander Gruenstein|AUTHOR Alexander Gruenstein]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2677–2681&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce VoiceFilter-Lite, a single-channel source separation model that runs on the device to preserve only the speech signals from a target user, as part of a streaming speech recognition system. Delivering such a model presents numerous challenges: It should improve the performance when the input signal consists of overlapped speech, and must not hurt the speech recognition performance under all other acoustic conditions. Besides, this model must be tiny, fast, and perform inference in a streaming fashion, in order to have minimal impact on CPU, memory, battery and latency. We propose novel techniques to meet these multi-faceted requirements, including using a new asymmetric loss, and adopting adaptive runtime suppression strength. We also show that such a model can be quantized as a 8-bit integer model and run in realtime.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ziqiang Shi|AUTHOR Ziqiang Shi]]^^1^^, [[Rujie Liu|AUTHOR Rujie Liu]]^^1^^, [[Jiqing Han|AUTHOR Jiqing Han]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Fujitsu, China; ^^2^^Harbin Institute of Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2682–2686&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural network with dual-path bi-directional long short-term memory (BiLSTM) block has been proved to be very effective in sequence modeling, especially in speech separation. This work investigates how to extend dual-path BiLSTM to result in a new state-of-the-art approach, called TasTas, for multi-talker monaural speech separation (a.k.a cocktail party problem). TasTas introduces two simple but effective improvements, one is an iterative multi-stage refinement scheme, and the other is to correct the speech with imperfect separation through a loss of speaker identity consistency between the separated speech and original speech, to boost the performance of dual-path BiLSTM based networks. TasTas takes the mixed utterance of two speakers and maps it to two separated utterances, where each utterance contains only one speaker’s voice. Our experiments on the notable benchmark WSJ0-2mix data corpus result in 20.55dB SDR improvement, 20.35dB SI-SDR improvement, 3.69 of PESQ, and 94.86% of ESTOI, which shows that our proposed networks can lead to big performance improvement on the speaker separation task. We have open sourced our reimplementation of the DPRNN-TasNet here¹, and our TasTas is realized based on this implementation of DPRNN-TasNet, it is believed that the results in this paper can be reproduced with ease.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiang Hao|AUTHOR Xiang Hao]]^^1^^, [[Shixue Wen|AUTHOR Shixue Wen]]^^2^^, [[Xiangdong Su|AUTHOR Xiangdong Su]]^^1^^, [[Yun Liu|AUTHOR Yun Liu]]^^2^^, [[Guanglai Gao|AUTHOR Guanglai Gao]]^^1^^, [[Xiaofei Li|AUTHOR Xiaofei Li]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Inner Mongolia University, China; ^^2^^Sogou, China; ^^3^^Westlake University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2687–2691&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In single-channel speech enhancement, methods based on full-band spectral features have been widely studying, while only a few methods pay attention to non-full-band spectral features. In this paper, we explore a knowledge distillation framework based on sub-band spectral mapping for single-channel speech enhancement. First, we divide the full frequency band into multiple sub-bands and pre-train elite-level sub-band enhancement model (teacher model) for each sub-band. The teacher models are dedicated to processing their own sub-bands. Next, under the teacher models’ guidance, we train a general sub-band enhancement model (student model) that works for all sub-bands. Without increasing the number of model parameters and computational complexity, the student model’s performance is further improved. To evaluate the proposed method, we conducted a large number of experiments on an open-source data set. The final experimental results show that the guidance from the elite-level teacher models dramatically improves the student model’s performance, which exceeds the full-band model by employing fewer parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sujan Kumar Roy|AUTHOR Sujan Kumar Roy]], [[Aaron Nicolson|AUTHOR Aaron Nicolson]], [[Kuldip K. Paliwal|AUTHOR Kuldip K. Paliwal]]
</p><p class="cpabstractcardaffiliationlist">Griffith University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2692–2696&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The existing Kalman filter (KF) suffers from poor estimates of the noise variance and the linear prediction coefficients (LPCs) in real-world noise conditions. This results in a degraded speech enhancement performance. In this paper, a deep learning approach is used to more accurately estimate the noise variance and LPCs, enabling the KF to enhance speech in various noise conditions. Specifically, a deep learning approach to MMSE-based noise power spectral density (PSD) estimation, called DeepMMSE, is used. The estimated noise PSD is used to compute the noise variance. We also construct a whitening filter with its coefficients computed from the estimated noise PSD. It is then applied to the noisy speech, yielding pre-whitened speech for computing the LPCs. The improved noise variance and LPC estimates enable the KF to minimise the //residual// noise and //distortion// in the enhanced speech. Experimental results show that the proposed method exhibits higher quality and intelligibility in the enhanced speech than the benchmark methods in various noise conditions for a wide-range of SNR levels.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongjiang Yu|AUTHOR Hongjiang Yu]]^^1^^, [[Wei-Ping Zhu|AUTHOR Wei-Ping Zhu]]^^1^^, [[Benoit Champagne|AUTHOR Benoit Champagne]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Concordia University, Canada; ^^2^^McGill University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2697–2701&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a novel deep neural network (DNN) assisted subband Kalman filtering system for speech enhancement. In the off-line phase, a DNN is trained to explore the relationships between the features of the noisy subband speech and the linear prediction coefficients of the clean ones, which are the key parameters in Kalman filtering. In the on-line phase, the input noisy speech is firstly decomposed into subbands, and then Kalman filtering is applied to each subband speech for denoising. The final enhanced speech is obtained by synthesizing the enhanced subband speeches. Experimental results show that our proposed system outperforms three Kalman filtering based methods in terms of both speech quality and intelligibility.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoqi Li|AUTHOR Xiaoqi Li]], [[Yaxing Li|AUTHOR Yaxing Li]], [[Yuanjie Dong|AUTHOR Yuanjie Dong]], [[Shan Xu|AUTHOR Shan Xu]], [[Zhihui Zhang|AUTHOR Zhihui Zhang]], [[Dan Wang|AUTHOR Dan Wang]], [[Shengwu Xiong|AUTHOR Shengwu Xiong]]
</p><p class="cpabstractcardaffiliationlist">WHUT, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2702–2706&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement aims to reduce the noise and improve the quality and intelligibility of noisy speech. Long short-term memory (LSTM) network frameworks have achieved great success on many speech enhancement applications. In this paper, the ordered neurons long short-term memory (ON-LSTM) network with a new inductive bias to differential the long/short-term information in each neuron is proposed for speech enhancement. Comparing the low-ranking neurons with short-term or local information, the high-ranking neurons which contain the long-term or global information always update less frequently for a wide range of influence. Thus, the ON-LSTM can automatically learn the clean speech information from noisy input and show better expressive ability. We also propose a rearrangement concatenation rule to connect the ON-LSTM outputs of forward and backward layers to construct the bidirectional ON-LSTM (Bi-ONLSTM) for further performance improvement. The experimental results reveal that the proposed ON-LSTM schemes produce better enhancement performance than the vanilla LSTM baseline. And visualization result shows that our proposed model can effectively capture clean speech components from noisy inputs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Leanne Nortje|AUTHOR Leanne Nortje]], [[Herman Kamper|AUTHOR Herman Kamper]]
</p><p class="cpabstractcardaffiliationlist">Stellenbosch University, South Africa</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2712–2716&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We consider the task of multimodal one-shot speech-image matching. An agent is shown a picture along with a spoken word describing the object in the picture, e.g. //cookie, broccoli// and //ice-cream//. After observing //one// paired speech-image example per class, it is shown a new set of unseen pictures, and asked to pick the “ice-cream”. Previous work attempted to tackle this problem using transfer learning: supervised models are trained on labelled background data not containing any of the one-shot classes. Here we compare transfer learning to unsupervised models trained on unlabelled in-domain data. On a dataset of paired isolated spoken and visual digits, we specifically compare unsupervised autoencoder-like models to supervised classifier and Siamese neural networks. In both unimodal and multimodal few-shot matching experiments, we find that transfer learning outperforms unsupervised training. We also present experiments towards combining the two methodologies, but find that transfer learning still performs best (despite idealised experiments showing the benefits of unsupervised learning).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vineel Pratap|AUTHOR Vineel Pratap]], [[Qiantong Xu|AUTHOR Qiantong Xu]], [[Anuroop Sriram|AUTHOR Anuroop Sriram]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2757–2761&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces Multilingual LibriSpeech (MLS) dataset, a large multilingual corpus suitable for speech research. The dataset is derived from read audiobooks from LibriVox and consists of 8 languages, including about 32K hours of English and a total of 4.5K hours for other languages. We provide baseline Automatic Speech Recognition (ASR) models and Language Models (LM) for all the languages in our dataset. We believe such a large transcribed dataset will open new avenues in ASR and Text-To-Speech (TTS) research. The dataset will be made freely available for anyone at http://www.openslr.org</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yoonhyung Lee|AUTHOR Yoonhyung Lee]], [[Seunghyun Yoon|AUTHOR Seunghyun Yoon]], [[Kyomin Jung|AUTHOR Kyomin Jung]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2717–2721&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a novel speech emotion recognition model called Cross Attention Network (CAN) that uses aligned audio and text signals as inputs. It is inspired by the fact that humans recognize speech as a combination of simultaneously produced acoustic and textual signals. First, our method segments the audio and the underlying text signals into equal number of steps in an aligned way so that the same time steps of the sequential signals cover the same time span in the signals. Together with this technique, we apply the cross attention to aggregate the sequential information from the aligned signals. In the cross attention, each modality is aggregated independently by applying the global attention mechanism onto each modality. Then, the attention weights of each modality are applied directly to the other modality in a crossed way, so that the CAN gathers the audio and text information from the same time steps based on each modality. In the experiments conducted on the standard IEMOCAP dataset, our model outperforms the state-of-the-art systems by 2.66% and 3.18% relatively in terms of the weighted and unweighted accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]
</p><p class="cpabstractcardaffiliationlist">BME, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2722–2726&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Articulatory-to-acoustic (forward) mapping is a technique to predict speech using various articulatory acquisition techniques (e.g. ultrasound tongue imaging, lip video). Real-time MRI (rtMRI) of the vocal tract has not been used before for this purpose. The advantage of MRI is that it has a high ‘relative’ spatial resolution: it can capture not only lingual, labial and jaw motion, but also the velum and the pharyngeal region, which is typically not possible with other techniques. In the current paper, we train various DNNs (fully connected, convolutional and recurrent neural networks) for articulatory-to-speech conversion, using rtMRI as input, in a speaker-specific way. We use two male and two female speakers of the USC-TIMIT articulatory database, each of them uttering 460 sentences. We evaluate the results with objective (Normalized MSE and MCD) and subjective measures (perceptual test) and show that CNN-LSTM networks are preferred which take multiple images as input, and achieve MCD scores between 2.8–4.5 dB. In the experiments, we find that the predictions of speaker ‘m1’ are significantly weaker than other speakers. We show that this is caused by the fact that 74% of the recordings of speaker ‘m1’ are out of sync.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]^^1^^, [[Csaba Zainkó|AUTHOR Csaba Zainkó]]^^1^^, [[László Tóth|AUTHOR László Tóth]]^^2^^, [[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]^^3^^, [[Alexandra Markó|AUTHOR Alexandra Markó]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BME, Hungary; ^^2^^University of Szeged, Hungary; ^^3^^University of Szeged, Hungary; ^^4^^MTA-ELTE LingArt, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2727–2731&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For articulatory-to-acoustic mapping using deep neural networks, typically spectral and excitation parameters of vocoders have been used as the training targets. However, vocoding often results in buzzy and muffled final speech quality. Therefore, in this paper on ultrasound-based articulatory-to-acoustic conversion, we use a flow-based neural vocoder (WaveGlow) pre-trained on a large amount of English and Hungarian speech data. The inputs of the convolutional neural network are ultrasound tongue images. The training target is the 80-dimensional mel-spectrogram, which results in a finer detailed spectral representation than the previously used 25-dimensional Mel-Generalized Cepstrum. From the output of the ultrasound-to-mel-spectrogram prediction, WaveGlow inference results in synthesized speech. We compare the proposed WaveGlow-based system with a continuous vocoder which does not use strict voiced/unvoiced decision when predicting F0. The results demonstrate that during the articulatory-to-acoustic mapping experiments, the WaveGlow neural vocoder produces significantly more natural synthesized speech than the baseline system. Besides, the advantage of WaveGlow is that F0 is included in the mel-spectrogram representation, and it is not necessary to predict the excitation separately.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siyuan Feng|AUTHOR Siyuan Feng]], [[Odette Scharenborg|AUTHOR Odette Scharenborg]]
</p><p class="cpabstractcardaffiliationlist">Technische Universiteit Delft, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2732–2736&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study addresses unsupervised subword modeling, i.e., learning feature representations that can distinguish subword units of a language. The proposed approach adopts a two-stage bottleneck feature (BNF) learning framework, consisting of autoregressive predictive coding (APC) as a front-end and a DNN-BNF model as a back-end. APC pretrained features are set as input features to a DNN-BNF model. A language-mismatched ASR system is used to provide cross-lingual phone labels for DNN-BNF model training. Finally, BNFs are extracted as the subword-discriminative feature representation. A second aim of this work is to investigate the robustness of our approach’s effectiveness to different amounts of training data. The results on Libri-light and the ZeroSpeech 2017 databases show that APC is effective in front-end feature pretraining. Our whole system outperforms the state of the art on both databases. Cross-lingual phone labels for English data by a Dutch ASR outperform those by a Mandarin ASR, possibly linked to the larger similarity of Dutch compared to Mandarin with English. Our system is less sensitive to training data amount when the training data is over 50 hours. APC pretraining leads to a reduction of needed training material from over 5,000 hours to around 200 hours with little performance degradation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kohei Matsuura|AUTHOR Kohei Matsuura]], [[Masato Mimura|AUTHOR Masato Mimura]], [[Shinsuke Sakai|AUTHOR Shinsuke Sakai]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2737–2741&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It is important to transcribe and archive speech data of endangered languages for preserving heritages of verbal culture and automatic speech recognition (ASR) is a powerful tool to facilitate this process. However, since endangered languages do not generally have large corpora with many speakers, the performance of ASR models trained on them are considerably poor in general. Nevertheless, we are often left with a lot of recordings of spontaneous speech data that have to be transcribed. In this work, for mitigating this speaker sparsity problem, we propose to convert the whole training speech data and make it sound like the test speaker in order to develop a highly accurate ASR system for this speaker. For this purpose, we utilize a CycleGAN-based non-parallel voice conversion technology to forge a labeled training data that is close to the test speaker’s speech. We evaluated this speaker adaptation approach on two low-resource corpora, namely, Ainu and Mboshi. We obtained 35–60% relative improvement in phone error rate on the Ainu corpus, and 40% relative improvement was attained on the Mboshi corpus. This approach outperformed two conventional methods namely unsupervised adaptation and multilingual training with these two corpora.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kazuki Tsunematsu|AUTHOR Kazuki Tsunematsu]], [[Johanes Effendi|AUTHOR Johanes Effendi]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]
</p><p class="cpabstractcardaffiliationlist">NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2742–2746&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>During a conversation, humans often predict the end of a sentence even when the other person has not finished it. In contrast, most current automatic speech recognition systems remain limited to passively recognizing what is being said. But applications like voice search, simultaneous speech translation, and spoken language communication may require a system that not only recognizes what has been said but also predicts what will be said. This paper proposes a speech completion system based on deep learning and discusses the construction in a text-to-text, speech-to-text, and speech-to-speech framework. We evaluate our system on domain-specific sentences with synthesized speech utterances that are only 25%, 50%, or 75% complete. Our proposed systems provide more natural suggestions than the Bidirectional Encoder Representations from Transformers (BERT) language representation model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Benjamin Milde|AUTHOR Benjamin Milde]], [[Chris Biemann|AUTHOR Chris Biemann]]
</p><p class="cpabstractcardaffiliationlist">Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2747–2751&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Sparsespeech model is an unsupervised acoustic model that can generate discrete pseudo-labels for untranscribed speech. We extend the Sparsespeech model to allow for sampling over a random discrete variable, yielding pseudo-posteriorgrams. The degree of sparsity in this posteriorgram can be fully controlled after the model has been trained. We use the Gumbel-Softmax trick to approximately sample from a discrete distribution in the neural network and this allows us to train the network efficiently with standard backpropagation. The new and improved model is trained and evaluated on the Libri-Light corpus, a benchmark for ASR with limited or no supervision. The model is trained on 600h and 6000h of English read speech. We evaluate the improved model using the ABX error measure and a semi-supervised setting with 10h of transcribed speech. We observe a relative improvement of up to 31.3% on ABX error rates within speakers and 22.5% across speakers with the improved Sparsespeech model on 600h of speech data and further improvements when scaling the model to 6000h.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Katerina Papadimitriou|AUTHOR Katerina Papadimitriou]], [[Gerasimos Potamianos|AUTHOR Gerasimos Potamianos]]
</p><p class="cpabstractcardaffiliationlist">University of Thessaly, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2752–2756&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we address the challenging problem of sign language recognition (SLR) from videos, introducing an end-to-end deep learning approach that relies on the fusion of a number of spatio-temporal feature streams, as well as a fully convolutional encoder-decoder for prediction. Specifically, we examine the contribution of optical flow, human skeletal features, as well as appearance features of handshapes and mouthing, in conjunction with a temporal deformable convolutional attention-based encoder-decoder for SLR. To our knowledge, this is the first use in this task of a fully convolutional multi-step attention-based encoder-decoder employing temporal deformable convolutional block structures. We conduct experiments on three sign language datasets and compare our approach to existing state-of-the-art SLR methods, demonstrating its superiority.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yang Chen|AUTHOR Yang Chen]]^^1^^, [[Weiran Wang|AUTHOR Weiran Wang]]^^2^^, [[Chao Wang|AUTHOR Chao Wang]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Georgia Tech, USA; ^^2^^Salesforce, USA; ^^3^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2787–2791&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While deep learning based end-to-end automatic speech recognition (ASR) systems have greatly simplified modeling pipelines, they suffer from the data sparsity issue. In this work, we propose a self-training method with an end-to-end system for semi-supervised ASR. Starting from a Connectionist Temporal Classification (CTC) system trained on the supervised data, we iteratively generate pseudo-labels on a mini-batch of unsupervised utterances with the current model, and use the pseudo-labels to augment the supervised data for immediate model update. Our method retains the simplicity of end-to-end ASR systems, and can be seen as performing alternating optimization over a well-defined learning objective. We also perform empirical investigations of our method, regarding the effect of data augmentation, decoding beamsize for pseudo-label generation, and freshness of pseudo-labels. On a commonly used semi-supervised ASR setting with the Wall Street Journal (WSJ) corpus, our method gives 14.4% relative WER improvement over a carefully-trained base system with data augmentation, reducing the performance gap between the base system and the oracle system by 46%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gary Wang|AUTHOR Gary Wang]]^^1^^, [[Andrew Rosenberg|AUTHOR Andrew Rosenberg]]^^2^^, [[Zhehuai Chen|AUTHOR Zhehuai Chen]]^^2^^, [[Yu Zhang|AUTHOR Yu Zhang]]^^2^^, [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]]^^2^^, [[Pedro J. Moreno|AUTHOR Pedro J. Moreno]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Simon Fraser University, Canada; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2832–2836&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent developments in data augmentation has brought great gains in improvement for automatic speech recognition (ASR). Parallel developments in augmentation policy search in computer vision domain has shown improvements in model performance and robustness. In addition, recent developments in semi-supervised learning has shown that consistency measures are crucial for performance and robustness. In this work, we demonstrate that combining augmentation policies with consistency measures and model regularization can greatly improve speech recognition performance. Using the Librispeech task, we show: 1) symmetric consistency measures such as the Jensen-Shannon Divergence provide 4% relative improvements in ASR performance; 2) Augmented adversarial inputs using Virtual Adversarial Noise (VAT) provides 12% relative win; and 3) random sampling from arbitrary combination of augmentation policies yields the best policy. These contributions result in an overall reduction in Word Error Rate (WER) of 15% relative on the Librispeech task presented in this paper.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hitesh Tulsiani|AUTHOR Hitesh Tulsiani]], [[Ashtosh Sapru|AUTHOR Ashtosh Sapru]], [[Harish Arsikere|AUTHOR Harish Arsikere]], [[Surabhi Punjabi|AUTHOR Surabhi Punjabi]], [[Sri Garimella|AUTHOR Sri Garimella]]
</p><p class="cpabstractcardaffiliationlist">Amazon, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2792–2796&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The speech recognition training data corresponding to digital voice assistants is dominated by wake-words. Training end-to-end (E2E) speech recognition models without careful attention to such data results in sub-optimal performance as models prioritize learning wake-words. To address this problem, we propose a novel discriminative initialization strategy by introducing a regularization term to penalize model for incorrectly hallucinating wake-words in early phases of training. We also explore other training strategies such as multi-task learning with listen-attend-spell (LAS), label smoothing via probabilistic modelling of silence and use of multiple pronunciations, and show how they can be combined with the proposed initialization technique. In addition, we show the connection between cost function of proposed discriminative initialization technique and minimum word error rate (MWER) criterion. We evaluate our methods on two E2E ASR systems, a phone-based system and a word-piece based system, trained on 6500 hours of Alexa’s Indian English speech corpus. We show that proposed techniques yield 20% word error rate reductions for phone based system and 6% for word-piece based system compared to corresponding baselines trained on the same data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Xiaofei Wang|AUTHOR Xiaofei Wang]], [[Zhong Meng|AUTHOR Zhong Meng]], [[Takuya Yoshioka|AUTHOR Takuya Yoshioka]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2797–2801&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes serialized output training (SOT), a novel framework for multi-speaker overlapped speech recognition based on an attention-based encoder-decoder approach. Instead of having multiple output layers as with the permutation invariant training (PIT), SOT uses a model with only one output layer that generates the transcriptions of multiple speakers one after another. The attention and decoder modules take care of producing multiple transcriptions from overlapped speech. SOT has two advantages over PIT: (1) no limitation in the maximum number of speakers, and (2) an ability to model the dependencies among outputs for different speakers. We also propose a simple trick that allows SOT to be executed in O(S), where S is the number of the speakers in the training sample, by using the start times of the constituent source utterances. Experimental results on LibriSpeech corpus show that the SOT models can transcribe overlapped speech with variable numbers of speakers significantly better than PIT-based models. We also show that the SOT models can accurately count the number of speakers in the input audio.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Felix Weninger|AUTHOR Felix Weninger]]^^1^^, [[Franco Mana|AUTHOR Franco Mana]]^^2^^, [[Roberto Gemello|AUTHOR Roberto Gemello]]^^2^^, [[Jesús Andrés-Ferrer|AUTHOR Jesús Andrés-Ferrer]]^^3^^, [[Puming Zhan|AUTHOR Puming Zhan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nuance Communications, USA; ^^2^^Nuance Communications, Italy; ^^3^^Nuance Communications, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2802–2806&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we apply Semi-Supervised Learning (SSL) along with Data Augmentation (DA) for improving the accuracy of End-to-End ASR. We focus on the consistency regularization principle, which has been successfully applied to image classification tasks, and present sequence-to-sequence (seq2seq) versions of the FixMatch and Noisy Student algorithms. Specifically, we generate the pseudo labels for the unlabeled data on-the-fly with a seq2seq model after perturbing the input features with DA. We also propose soft label variants of both algorithms to cope with pseudo label errors, showing further performance improvements. We conduct SSL experiments on a conversational speech data set (doctor-patient conversations) with 1.9 kh manually transcribed training data, using only 25% of the original labels (475 h labeled data). In the result, the Noisy Student algorithm with soft labels and consistency regularization achieves 10.4% word error rate (WER) reduction when adding 475 h of unlabeled data, corresponding to a recovery rate of 92%. Furthermore, when iteratively adding 950 h more unlabeled data, our best SSL performance is within 5% WER increase compared to using the full labeled training set (recovery rate: 78%).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jinxi Guo|AUTHOR Jinxi Guo]]^^1^^, [[Gautam Tiwari|AUTHOR Gautam Tiwari]]^^2^^, [[Jasha Droppo|AUTHOR Jasha Droppo]]^^2^^, [[Maarten Van Segbroeck|AUTHOR Maarten Van Segbroeck]]^^2^^, [[Che-Wei Huang|AUTHOR Che-Wei Huang]]^^2^^, [[Andreas Stolcke|AUTHOR Andreas Stolcke]]^^2^^, [[Roland Maas|AUTHOR Roland Maas]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2807–2811&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we propose a novel and efficient minimum word error rate (MWER) training method for RNN-Transducer (RNN-T). Unlike previous work on this topic, which performs on-the-fly limited-size beam-search decoding and generates alignment scores for expected edit-distance computation, in our proposed method, we re-calculate and sum scores of all the possible alignments for each hypothesis in N-best lists. The hypothesis probability scores and back-propagated gradients are calculated efficiently using the forward-backward algorithm. Moreover, the proposed method allows us to decouple the decoding and training processes, and thus we can perform offline parallel-decoding and MWER training for each subset iteratively. Experimental results show that this proposed semi-on-the-fly method can speed up the on-the-fly method by 6 times and result in a similar WER improvement (3.6%) over a baseline RNN-T model. The proposed MWER training can also effectively reduce high-deletion errors (9.2% WER-reduction) introduced by RNN-T models when EOS is added for end-pointer. Further improvement can be achieved if we use a proposed RNN-T rescoring method to re-rank hypotheses and use external RNN-LM to perform additional rescoring. The best system achieves a 5% relative improvement on an English test-set of real far-field recordings and a 11.6% WER reduction on music-domain utterances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Albert Zeyer|AUTHOR Albert Zeyer]], [[André Merboldt|AUTHOR André Merboldt]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2812–2816&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The //RNN transducer// is a promising end-to-end model candidate. We compare the original training criterion with the full marginalization over all alignments, to the commonly used maximum approximation, which simplifies, improves and speeds up our training. We also generalize from the original neural network model and study more powerful models, made possible due to the maximum approximation. We further generalize the output label topology to cover RNN-T, RNA and CTC. We perform several studies among all these aspects, including a study on the effect of external alignments. We find that the transducer model generalizes much better on longer sequences than the attention model. Our final transducer model outperforms our attention model on Switchboard 300h by over 6% relative WER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel S. Park|AUTHOR Daniel S. Park]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Ye Jia|AUTHOR Ye Jia]], [[Wei Han|AUTHOR Wei Han]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[Bo Li|AUTHOR Bo Li]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Quoc V. Le|AUTHOR Quoc V. Le]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2817–2821&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, a semi-supervised learning method known as “noisy student training” has been shown to improve image classification performance of deep networks significantly. Noisy student training is an iterative self-training method that leverages augmentation to improve network performance. In this work, we adapt and improve noisy student training for automatic speech recognition, employing (adaptive) SpecAugment as the augmentation method. We find effective methods to filter, balance and augment the data generated in between self-training iterations. By doing so, we are able to obtain word error rates (WERs) 4.2%/8.6% on the clean/noisy LibriSpeech test sets by only using the clean 100h subset of LibriSpeech as the supervised set and the rest (860h) as the unlabeled set. Furthermore, we are able to achieve WERs 1.7%/3.4% on the clean/noisy LibriSpeech test sets by using the unlab-60k subset of LibriLight as the unlabeled set for LibriSpeech 960h. We are thus able to improve upon the previous state-of-the-art clean/noisy test WERs achieved on LibriSpeech 100h (4.74%/12.20%) and LibriSpeech (1.9%/4.1%).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ryo Masumura|AUTHOR Ryo Masumura]]^^1^^, [[Naoki Makishima|AUTHOR Naoki Makishima]]^^1^^, [[Mana Ihori|AUTHOR Mana Ihori]]^^1^^, [[Akihiko Takashima|AUTHOR Akihiko Takashima]]^^1^^, [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]]^^2^^, [[Shota Orihashi|AUTHOR Shota Orihashi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTT, Japan; ^^2^^NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2822–2826&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes a simple and efficient pre-training method using a large number of external texts to enhance end-to-end automatic speech recognition (ASR). Generally, it is essential to prepare speech-to-text paired data to construct end-to-end ASR models, but it is difficult to collect a large amount of such data in practice. One issue caused by data scarcity is that the performance of ASR on out-of-domain tasks different from those using the speech-to-text paired data is poor, since the mapping from the speech information to textual information is not well learned. To address this problem, we leverage a large number of phoneme-to-grapheme (P2G) paired data, which can be easily created from external texts and a rich pronunciation dictionary. The P2G conversion and end-to-end ASR are regarded as similar transformation tasks where the input phonetic information is converted into textual information. Our method utilizes the P2G conversion task for pre-training of a decoder network in Transformer encoder-decoder based end-to-end ASR. Experiments using 4 billion tokens of Web text demonstrates that the performance of ASR on out-of-domain tasks can be significantly improved by our pre-training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]]^^1^^, [[Ankur Kumar|AUTHOR Ankur Kumar]]^^2^^, [[Kwangyoun Kim|AUTHOR Kwangyoun Kim]]^^1^^, [[Hejung Yang|AUTHOR Hejung Yang]]^^1^^, [[Abhinav Garg|AUTHOR Abhinav Garg]]^^1^^, [[Sachin Singh|AUTHOR Sachin Singh]]^^2^^, [[Jiyeon Kim|AUTHOR Jiyeon Kim]]^^1^^, [[Mehul Kumar|AUTHOR Mehul Kumar]]^^1^^, [[Sichen Jin|AUTHOR Sichen Jin]]^^1^^, [[Shatrughan Singh|AUTHOR Shatrughan Singh]]^^3^^, [[Chanwoo Kim|AUTHOR Chanwoo Kim]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Samsung, Korea; ^^2^^Samsung, India; ^^3^^Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2827–2831&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose an utterance invariant training (UIT) specifically designed to improve the performance of a two-pass end-to-end hybrid ASR. Our proposed hybrid ASR solution uses a shared encoder with a monotonic chunkwise attention (MoChA) decoder for streaming capabilities, while using a low-latency bidirectional full-attention (BFA) decoder for enhancing the overall ASR accuracy. A modified sequence summary network (SSN) based utterance invariant training is used to suit the two-pass model architecture. The input feature stream self-conditioned by scaling and shifting with its own sequence summary is used as a concatenative conditioning on the bidirectional encoder layers sitting on top of the shared encoder. In effect, the proposed utterance invariant training combines three different types of conditioning namely, concatenative, multiplicative and additive. Experimental results show that the proposed approach shows reduction in word error rates up to 7% relative on Librispeech, and 10–15% on a large scale Korean end-to-end two-pass hybrid ASR model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sneha Das|AUTHOR Sneha Das]]^^1^^, [[Tom Bäckström|AUTHOR Tom Bäckström]]^^1^^, [[Guillaume Fuchs|AUTHOR Guillaume Fuchs]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^Fraunhofer IIS, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2837–2841&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech codecs can use postfilters to improve the quality of the decoded signal. While postfiltering is effective in reducing coding artifacts, such methods often involve processing in both the encoder and the decoder, rely on additional transmitted side information, or are highly dependent on other codec functions for optimal performance. We propose a low-complexity postfiltering method to improve the harmonic structure of the decoded signal, which models the fundamental frequency of the signal. In contrast to past approaches, the postfilter operates at the decoder as a standalone function and does not need the transmission of additional side information. It can thus be used to enhance the output of any codec. We tested the approach on a modified version of the EVS codec in TCX mode only, which is subject to more pronounced coding artefacts when used at its lowest bitrate. Listening test results show an average improvement of 7 MUSHRA points for decoded signals with the proposed harmonic postfilter.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Arthur Van Den Broucke|AUTHOR Arthur Van Den Broucke]]^^1^^, [[Deepak Baby|AUTHOR Deepak Baby]]^^2^^, [[Sarah Verhulst|AUTHOR Sarah Verhulst]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ghent University, Belgium; ^^2^^Idiap Research Institute, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2842–2846&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Biophysically realistic models of the cochlea are based on cascaded transmission-line (TL) models which capture longitudinal coupling, cochlear nonlinearities, as well as the human frequency selectivity. However, these models are slow to compute (order of seconds/minutes) while machine-hearing and hearing-aid applications require a real-time solution. Consequently, real-time applications often adopt more basic and less time-consuming descriptions of cochlear processing (gamma-tone, dual resonance nonlinear) even though there are clear advantages in using more biophysically correct models. To overcome this, we recently combined nonlinear Deep Neural Networks (DNN) with analytical TL cochlear model descriptions to build a real-time model of cochlear processing which captures the biophysical properties associated with the TL model. In this work, we aim to extend the normal-hearing DNN-based cochlear model (CoNNear) to simulate frequency-specific patterns of hearing sensitivity loss, yielding a set of normal and hearing-impaired auditory models which can be computed in real-time and are differentiable. They can hence be used in backpropagation networks to develop the next generation of hearing-aid and machine hearing applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jan Skoglund|AUTHOR Jan Skoglund]]^^1^^, [[Jean-Marc Valin|AUTHOR Jean-Marc Valin]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2847–2851&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The voice mode of the Opus audio coder can compress wideband speech at bit rates ranging from 6 kb/s to 40 kb/s. However, Opus is at its core a waveform matching coder, and as the rate drops below 10 kb/s, quality degrades quickly. As the rate reduces even further, parametric coders tend to perform better than waveform coders. In this paper we propose a backward-compatible way of improving low bit rate Opus quality by resynthesizing speech from the decoded parameters. We compare two different neural generative models, WaveNet and LPCNet. WaveNet is a powerful, high-complexity, and high-latency architecture that is not feasible for a practical system, yet provides a best known achievable quality with generative models. LPCNet is a low-complexity, low-latency RNN-based generative model, and practically implementable on mobile phones. We apply these systems with parameters from Opus coded at 6 kb/s as conditioning features for the generative models. A listening test shows that for the same 6 kb/s Opus bit stream, synthesized speech using LPCNet clearly outperforms the output of the standard Opus decoder. This opens up ways to improve the decoding quality of existing speech and audio waveform coders without breaking compatibility.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pranay Manocha|AUTHOR Pranay Manocha]]^^1^^, [[Adam Finkelstein|AUTHOR Adam Finkelstein]]^^1^^, [[Richard Zhang|AUTHOR Richard Zhang]]^^2^^, [[Nicholas J. Bryan|AUTHOR Nicholas J. Bryan]]^^2^^, [[Gautham J. Mysore|AUTHOR Gautham J. Mysore]]^^2^^, [[Zeyu Jin|AUTHOR Zeyu Jin]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Princeton University, USA; ^^2^^Adobe, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2852–2856&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many audio processing tasks require perceptual assessment. The “gold standard” of obtaining human judgments is time-consuming, expensive, and cannot be used as an optimization criterion. On the other hand, automated metrics are efficient to compute but often correlate poorly with human judgment, particularly for audio differences at the threshold of human detection. In this work, we construct a metric by fitting a deep neural network to a new large dataset of crowdsourced human judgments. Subjects are prompted to answer a straightforward, objective question: are two recordings identical or not? These pairs are algorithmically generated under a variety of perturbations, including noise, reverb, and compression artifacts; the perturbation space is probed with the goal of efficiently identifying the just-noticeable difference (JND) level of the subject. We show that the resulting learned metric is well-calibrated with human judgments, outperforming baseline methods. Since it is a deep network, the metric is differentiable, making it suitable as a loss function for other tasks. Thus, simply replacing an existing loss (e.g., deep feature loss) with our metric yields significant improvement in a denoising network, as measured by subjective pairwise comparison.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Piotr Masztalski|AUTHOR Piotr Masztalski]], [[Mateusz Matuszewski|AUTHOR Mateusz Matuszewski]], [[Karol Piaskowski|AUTHOR Karol Piaskowski]], [[Michal Romaniuk|AUTHOR Michal Romaniuk]]
</p><p class="cpabstractcardaffiliationlist">Samsung, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2857–2861&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we introduce StoRIR — a stochastic room impulse response generation method dedicated to audio data augmentation in machine learning applications. This technique, in contrary to geometrical methods like image-source or ray tracing, does not require prior definition of room geometry, absorption coefficients or microphone and source placement and is dependent solely on the acoustic parameters of the room. The method is intuitive, easy to implement and allows to generate RIRs of very complicated enclosures. We show that StoRIR, when used for audio data augmentation in a speech enhancement task, allows deep learning models to achieve better results on a wide range of metrics than when using the conventional image-source method, effectively improving many of them by more than 5%. We publish a Python implementation of StoRIR online¹.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Babak Naderi|AUTHOR Babak Naderi]]^^1^^, [[Ross Cutler|AUTHOR Ross Cutler]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Berlin, Germany; ^^2^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2862–2866&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The ITU-T Recommendation P.808 provides a crowdsourcing approach for conducting a subjective assessment of speech quality using the Absolute Category Rating (ACR) method. We provide an open-source implementation of the ITU-T Rec. P.808 that runs on the Amazon Mechanical Turk platform. We extended our implementation to include Degradation Category Ratings (DCR) and Comparison Category Ratings (CCR) test methods. We also significantly speed up the test process by integrating the participant qualification step into the main rating task compared to a two-stage qualification and rating solution. We provide program scripts for creating and executing the subjective test, and data cleansing and analyzing the answers to avoid operational errors. To validate the implementation, we compare the Mean Opinion Scores (MOS) collected through our implementation with MOS values from a standard laboratory experiment conducted based on the ITU-T Rec. P.800. We also evaluate the reproducibility of the result of the subjective speech quality assessment through crowdsourcing using our implementation. Finally, we quantify the impact of parts of the system designed to improve the reliability: environmental tests, gold and trapping questions, rating patterns, and a headset usage test.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gabriel Mittag|AUTHOR Gabriel Mittag]]^^1^^, [[Ross Cutler|AUTHOR Ross Cutler]]^^2^^, [[Yasaman Hosseinkashi|AUTHOR Yasaman Hosseinkashi]]^^2^^, [[Michael Revow|AUTHOR Michael Revow]]^^2^^, [[Sriram Srinivasan|AUTHOR Sriram Srinivasan]]^^2^^, [[Naglakshmi Chande|AUTHOR Naglakshmi Chande]]^^2^^, [[Robert Aichner|AUTHOR Robert Aichner]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Berlin, Germany; ^^2^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2867–2871&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Classic public switched telephone networks (PSTN) are often a black box for VoIP network providers, as they have no access to performance indicators, such as delay or packet loss. Only the degraded output speech signal can be used to monitor the speech quality of these networks. However, the current state-of-the-art speech quality models are not reliable enough to be used for live monitoring. One of the reasons for this is that PSTN distortions can be unique depending on the provider and country, which makes it difficult to train a model that generalizes well for different PSTN networks. In this paper, we present a new open-source PSTN speech quality test set with over 1000 crowdsourced real phone calls. Our proposed no-reference model outperforms the full-reference POLQA and no-reference P.563 on the validation and test set. Further, we analyzed the influence of file cropping on the perceived speech quality and the influence of the number of ratings and training size on the model accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sebastian Möller|AUTHOR Sebastian Möller]]^^1^^, [[Tobias Hübschen|AUTHOR Tobias Hübschen]]^^2^^, [[Thilo Michael|AUTHOR Thilo Michael]]^^1^^, [[Gabriel Mittag|AUTHOR Gabriel Mittag]]^^1^^, [[Gerhard Schmidt|AUTHOR Gerhard Schmidt]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Berlin, Germany; ^^2^^Christian-Albrechts-Universität zu Kiel, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2872–2876&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With the advent of speech communication systems transmitting the full audible frequency band (0–20,000 Hz), traditional approaches for narrowband (300–3,400 Hz) speech quality estimation, service planning and monitoring come to their limits. Recently, signal-based as well as parametric tools have been developed for fullband speech quality prediction. These tools estimate overall quality, but do not provide diagnostic information about the technical causes of degradations. In the present paper, we evaluate approaches for diagnostically monitoring the quality of super-wideband and fullband speech communication services. The aim is, first, to estimate technical causes of degradations from the degraded output signals, and, second, to combine the estimated causes with parametric quality prediction models to obtain a quantitative diagnostic picture of the quality-degrading aspects. We evaluate approaches for non-intrusively identifying coding schemes and packet-loss, and compare estimated quality to the predictions of an intrusive signal-based model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Binghuai Lin|AUTHOR Binghuai Lin]]^^1^^, [[Liyuan Wang|AUTHOR Liyuan Wang]]^^1^^, [[Xiaoli Feng|AUTHOR Xiaoli Feng]]^^2^^, [[Jinsong Zhang|AUTHOR Jinsong Zhang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tencent, China; ^^2^^BLCU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3022–3026&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic pronunciation assessment and error detection play an important part of Computer-Assisted Pronunciation Training (CAPT). Traditional approaches normally focus on scoring of sentences, words or mispronunciation detection of phonemes independently without considering the hierarchical and contextual relationships among them. In this paper, we develop a hierarchical network which combines scoring at the granularity of phoneme, word and sentence jointly. Specifically, we achieve the phoneme scores by a semi-supervised phoneme mispronunciation detection method, the words scores by an attention mechanism, and the sentence scores by a non-linear regression method. To further model the correlation between the sentence and phoneme, we optimize the network by a multitask learning framework (MTL). The proposed framework relies on a few sentence-level labeled data and a majority of unlabeled data. We evaluate the network performance on a multi-granular dataset consisting of sentences, words and phonemes which was recorded by 1,000 Chinese speakers and labeled by three experts. Experimental results show that the proposed method is well correlated with human raters with a Pearson correlation coefficient (PCC) of 0.88 at sentence level and 0.77 at word level. Furthermore, the semi-supervised phoneme mispronunciation detection achieves a comparable result by F1-measure with our supervised baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tien-Hong Lo|AUTHOR Tien-Hong Lo]], [[Shi-Yan Weng|AUTHOR Shi-Yan Weng]], [[Hsiu-Jui Chang|AUTHOR Hsiu-Jui Chang]], [[Berlin Chen|AUTHOR Berlin Chen]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan Normal University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3027–3031&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, end-to-end (E2E) automatic speech recognition (ASR) systems have garnered tremendous attention because of their great success and unified modeling paradigms in comparison to conventional hybrid DNN-HMM ASR systems. Despite the widespread adoption of E2E modeling frameworks on ASR, there still is a dearth of work on investigating the E2E frameworks for use in computer-assisted pronunciation learning (CAPT), particularly for mispronunciation detection (MD). In response, we first present a novel use of hybrid CTC-Attention approach to the MD task, taking advantage of the strengths of both CTC and the attention-based model meanwhile getting around the need for phone-level forced-alignment. Second, we perform input augmentation with text prompt information to make the resulting E2E model more tailored for the MD task. On the other hand, we adopt two MD decision methods so as to better cooperate with the proposed framework: 1) decision-making based on a recognition confidence measure or 2) simply based on speech recognition results. A series of Mandarin MD experiments demonstrate that our approach not only simplifies the processing pipeline of existing hybrid DNN-HMM systems but also brings about systematic and substantial performance improvements. Furthermore, input augmentation with text prompts seems to hold excellent promise for the E2E-based MD approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bi-Cheng Yan|AUTHOR Bi-Cheng Yan]]^^1^^, [[Meng-Che Wu|AUTHOR Meng-Che Wu]]^^2^^, [[Hsiao-Tsung Hung|AUTHOR Hsiao-Tsung Hung]]^^2^^, [[Berlin Chen|AUTHOR Berlin Chen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Taiwan Normal University; ^^2^^ASUS</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3032–3036&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Mispronunciation detection and diagnosis (MDD) is a core component of computer-assisted pronunciation training (CAPT). Most of the existing MDD approaches focus on dealing with categorical errors (viz. one canonical phone is substituted by another one, aside from those mispronunciations caused by deletions or insertions). However, accurate detection and diagnosis of non-categorial or distortion errors (viz. approximating L2 phones with L1 (first-language) phones, or erroneous pronunciations in between) still seems out of reach. In view of this, we propose to conduct MDD with a novel end-to-end automatic speech recognition (E2E-based ASR) approach. In particular, we expand the original L2 phone set with their corresponding anti-phone set, making the E2E-based MDD approach have a better capability to take in both categorical and non-categorial mispronunciations, aiming to provide better mispronunciation detection and diagnosis feedback. Furthermore, a novel transfer-learning paradigm is devised to obtain the initial model estimate of the E2E-based MDD system without resource to any phonological rules. Extensive sets of experimental results on the L2-ARCTIC dataset show that our best system can outperform the existing E2E baseline system and pronunciation scoring based method (GOP) in terms of the F1-score, by 11.05% and 27.71%, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Richeng Duan|AUTHOR Richeng Duan]], [[Nancy F. Chen|AUTHOR Nancy F. Chen]]
</p><p class="cpabstractcardaffiliationlist">A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3037–3041&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Processing children’s speech is challenging due to high speaker variability arising from vocal tract size and scarce amounts of publicly available linguistic resources. In this work, we tackle such challenges by proposing an unsupervised feature adaptation approach based on adversarial multi-task training in a neural framework. A front-end feature transformation module is positioned prior to an acoustic model trained on adult speech (1) to leverage on the readily available linguistic resources on adult speech or existing models, and (2) to reduce the acoustic mismatch between child and adult speech. Experimental results demonstrate that our proposed approach consistently outperforms established baselines trained on adult speech across a variety of tasks ranging from speech recognition to pronunciation assessment and fluency score prediction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Longfei Yang|AUTHOR Longfei Yang]]^^1^^, [[Kaiqi Fu|AUTHOR Kaiqi Fu]]^^2^^, [[Jinsong Zhang|AUTHOR Jinsong Zhang]]^^2^^, [[Takahiro Shinozaki|AUTHOR Takahiro Shinozaki]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tokyo Tech, Japan; ^^2^^BLCU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3042–3046&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Pronunciation erroneous tendencies (PETs) are designed to provide instructive feedback to guide non-native language learners to correct their pronunciation errors in language learning thus PET detection plays an important role in computer-aided pronunciation training (CAPT) system. However, PET detection suffers data sparsity problem because non-native data collection and annotation are time-consuming tasks. In this paper, we propose an unsupervised learning framework based on contrastive predictive coding (CPC) to extract knowledge from a large scale of unlabeled speech from two native languages, and then transfer this knowledge to the PET detection task. In this framework, language adversarial training is incorporated to guide the model to align the feature distribution between two languages. In addition, sinc filter is introduced to extract formant-like feature that is considered relevant to some kinds of pronunciation errors. Through the experiment on the Japanese part of BLCU inter-Chinese speech corpus, results show that our proposed language adversarial represent learning is effective to improve the performance of pronunciation erroneous tendency detection for non-native language learners.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sitong Cheng|AUTHOR Sitong Cheng]], [[Zhixin Liu|AUTHOR Zhixin Liu]], [[Lantian Li|AUTHOR Lantian Li]], [[Zhiyuan Tang|AUTHOR Zhiyuan Tang]], [[Dong Wang|AUTHOR Dong Wang]], [[Thomas Fang Zheng|AUTHOR Thomas Fang Zheng]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3047–3051&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most of the pronunciation assessment methods are based on local features derived from automatic speech recognition (ASR), e.g., the Goodness of Pronunciation (GOP) score. In this paper, we investigate an ASR-free scoring approach that is derived from the marginal distribution of raw speech signals. The hypothesis is that even if we have no knowledge of the language (so cannot recognize the phones/words), we can still tell how good a pronunciation is, by comparatively listening to some speech data from the target language. Our analysis shows that this new scoring approach provides an interesting correction for the phone-competition problem of GOP. Experimental results on the ERJ dataset demonstrated that combining the ASR-free score and GOP can achieve better performance than the GOP baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Konstantinos Kyriakopoulos|AUTHOR Konstantinos Kyriakopoulos]], [[Kate M. Knill|AUTHOR Kate M. Knill]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3052–3056&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Detecting individual pronunciation errors and diagnosing pronunciation error tendencies in a language learner based on their speech are important components of computer-aided language learning (CALL). The tasks of error detection and error tendency diagnosis become particularly challenging when the speech in question is spontaneous and particularly given the challenges posed by the inconsistency of human annotation of pronunciation errors. This paper presents an approach to these tasks by distinguishing between lexical errors, wherein the speaker does not know how a particular word is pronounced, and accent errors, wherein the candidate’s speech exhibits consistent patterns of phone substitution, deletion and insertion. Three annotated corpora of non-native English speech by speakers of multiple L1s are analysed, the consistency of human annotation investigated and a method presented for detecting individual accent and lexical errors and diagnosing accent error tendencies at the speaker level.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiatong Shi|AUTHOR Jiatong Shi]]^^1^^, [[Nan Huo|AUTHOR Nan Huo]]^^1^^, [[Qin Jin|AUTHOR Qin Jin]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^RUC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3057–3061&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Mispronunciation detection is an essential component of the Computer-Assisted Pronunciation Training (CAPT) systems. State-of-the-art mispronunciation detection models use Deep Neural Networks (DNN) for acoustic modeling, and a Goodness of Pronunciation (GOP) based algorithm for pronunciation scoring. However, GOP based scoring models have two major limitations: i.e., (i) They depend on forced alignment which splits the speech into phonetic segments and independently use them for scoring, which neglects the transitions between phonemes within the segment; (ii) They only focus on phonetic segments, which fails to consider the context effects across phonemes (such as liaison, omission, incomplete plosive sound, etc.). In this work, we propose the Context-aware Goodness of Pronunciation (CaGOP) scoring model. Particularly, two factors namely the transition factor and the duration factor are injected into CaGOP scoring. The transition factor identifies the transitions between phonemes and applies them to weight the frame-wise GOP. Moreover, a self-attention based phonetic duration modeling is proposed to introduce the duration factor into the scoring model. The proposed scoring model significantly outperforms baselines, achieving 20% and 12% relative improvement over the GOP model on the phoneme-level and sentence-level mispronunciation detection respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Chu|AUTHOR Wei Chu]]^^1^^, [[Yang Liu|AUTHOR Yang Liu]]^^2^^, [[Jianwei Zhou|AUTHOR Jianwei Zhou]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^PAII, USA; ^^2^^Amazon, USA; ^^3^^LAIX, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3062–3066&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposed a procedure for detecting and recognizing mispronunciations in training data, and improved non-native acoustic modeling by training with the corrected phone alignments. To start, an initial phone sequence for an utterance is derived from its word-level transcription and a dictionary of canonical pronunciation. Following that, the region of mispronunciation is detected through examining phone-level goodness-of-pronunciation (GOP) scores. Then over the region, a constrained phone decoder is used to recognize the most likely pronounced phone sequence from all the possible phone sequences with one phone edit distance from the initial phone sequence. After updating the phone alignments and GOP scores, this detection and recognition procedure is repeated until no more mispronunciation is detected. Experiments on a 300-hour non-native spontaneous dataset showed that the acoustic model trained from the proposed procedure reduced WER by 6% compared to a well optimized context-dependent factorized-TDNN HMM baseline system with the same neural network topology. This work also offered a data-driven approach for generating a list of common mispronunciation patterns of non-native English learners that may be useful for speech assessment purpose.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Hsi-Wei Hsieh|AUTHOR Hsi-Wei Hsieh]], [[Nicolas Charon|AUTHOR Nicolas Charon]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3391–3395&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a novel method for emotion conversion in speech based on a chained encoder-decoder-predictor neural network architecture. The encoder constructs a latent embedding of the fundamental frequency (F0) contour and the spectrum, which we regularize using the Large Diffeomorphic Metric Mapping (LDDMM) registration framework. The decoder uses this embedding to predict the modified F0 contour in a target emotional class. Finally, the predictor uses the original spectrum and the modified F0 contour to generate a corresponding target spectrum. Our joint objective function simultaneously optimizes the parameters of three model blocks. We show that our method outperforms the existing state-of-the-art approaches on both, the saliency of emotion conversion and the quality of resynthesized speech. In addition, the LDDMM regularization allows our model to convert phrases that were not present in training, thus providing evidence for out-of-sample generalization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fengyu Yang|AUTHOR Fengyu Yang]]^^1^^, [[Shan Yang|AUTHOR Shan Yang]]^^1^^, [[Qinghua Wu|AUTHOR Qinghua Wu]]^^2^^, [[Yujun Wang|AUTHOR Yujun Wang]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^Xiaomi, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3436–3440&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based seq2seq text-to-speech systems, especially those use self-attention networks (SAN), have achieved state-of-art performance. But an expressive corpus with rich prosody is still challenging to model as 1) prosodic aspects, which span across different sentential granularities and mainly determine acoustic expressiveness, are difficult to quantize and label and 2) the current seq2seq framework extracts prosodic information solely from a text encoder, which is easily collapsed to an averaged expression for expressive contents. In this paper, we propose a context extractor, which is built upon SAN-based text encoder, to sufficiently exploit the sentential context over an expressive corpus for seq2seq-based TTS. Our context extractor first collects prosodic-related sentential context information from different SAN layers and then aggregates them to learn a comprehensive sentence representation to enhance the expressiveness of the final generated speech. Specifically, we investigate two methods of context aggregation: 1) //direct aggregation// which directly concatenates the outputs of different SAN layers, and 2) //weighted aggregation// which uses multi-head attention to automatically learn contributions for different SAN layers. Experiments on two expressive corpora show that our approach can produce more natural speech with much richer prosodic variations, and weighted aggregation is more superior in modeling expressivity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yukiya Hono|AUTHOR Yukiya Hono]]^^1^^, [[Kazuna Tsuboi|AUTHOR Kazuna Tsuboi]]^^2^^, [[Kei Sawada|AUTHOR Kei Sawada]]^^2^^, [[Kei Hashimoto|AUTHOR Kei Hashimoto]]^^1^^, [[Keiichiro Oura|AUTHOR Keiichiro Oura]]^^1^^, [[Yoshihiko Nankaku|AUTHOR Yoshihiko Nankaku]]^^1^^, [[Keiichi Tokuda|AUTHOR Keiichi Tokuda]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nagoya Institute of Technology, Japan; ^^2^^Microsoft, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3441–3445&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a hierarchical generative model with a multi-grained latent variable to synthesize expressive speech. In recent years, fine-grained latent variables are introduced into the text-to-speech synthesis that enable the fine control of the prosody and speaking styles of synthesized speech. However, the naturalness of speech degrades when these latent variables are obtained by sampling from the standard Gaussian prior. To solve this problem, we propose a novel framework for modeling the fine-grained latent variables, considering the dependence on an input text, a hierarchical linguistic structure, and a temporal structure of latent variables. This framework consists of a multi-grained variational autoencoder, a conditional prior, and a multi-level auto-regressive latent converter to obtain the different time-resolution latent variables and sample the finer-level latent variables from the coarser-level ones by taking into account the input text. Experimental results indicate an appropriate method of sampling fine-grained latent variables without the reference signal at the synthesis stage. Our proposed framework also provides the controllability of speaking style in an entire utterance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sefik Emre Eskimez|AUTHOR Sefik Emre Eskimez]], [[Dimitrios Dimitriadis|AUTHOR Dimitrios Dimitriadis]], [[Robert Gmyr|AUTHOR Robert Gmyr]], [[Kenichi Kumanati|AUTHOR Kenichi Kumanati]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3446–3450&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we propose a GAN-based method to generate synthetic data for speech emotion recognition. Specifically, we investigate the usage of GANs for capturing the data manifold when the data is //eyes-off//, i.e., where we can train networks using the data but cannot copy it from the clients. We propose a CNN-based GAN with spectral normalization on both the generator and discriminator, both of which are pre-trained on large unlabeled speech corpora. We show that our method provides better speech emotion recognition performance than a strong baseline. Furthermore, we show that even after the data on the client is lost, our model can generate similar data that can be used for model bootstrapping in the future. Although we evaluated our method for speech emotion recognition, it can be applied to other tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hira Dhamyal|AUTHOR Hira Dhamyal]], [[Shahan Ali Memon|AUTHOR Shahan Ali Memon]], [[Bhiksha Raj|AUTHOR Bhiksha Raj]], [[Rita Singh|AUTHOR Rita Singh]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3451–3455&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>//Can vocal emotions be emulated?// This question has been a recurrent concern of the speech community, and has also been vigorously investigated. It has been fueled further by its link to the issue of validity of acted emotion databases. Much of the speech and vocal emotion research has relied on acted emotion databases as valid //proxies// for studying natural emotions. To create models that generalize to natural settings, it is crucial to work with //valid// prototypes — ones that can be assumed to reliably represent natural emotions. More concretely, it is important to study emulated emotions against natural emotions in terms of their physiological, and psychological concomitants. In this paper, we present an on-scale systematic study of the differences between natural and acted vocal emotions. We use a self-attention based emotion classification model to understand the phonetic bases of emotions by discovering the most //‘attended’// phonemes for each class of emotions. We then compare these attended-phonemes in their importance and distribution across acted and natural classes. Our tests show significant differences in the manner and choice of phonemes in acted and natural speech, concluding moderate to low validity and value in using acted speech databases for emotion classification tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Jacob Sager|AUTHOR Jacob Sager]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3396–3400&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce a novel method for emotion conversion in speech that does not require parallel training data. Our approach loosely relies on a cycle-GAN schema to minimize the reconstruction error from converting back and forth between emotion pairs. However, unlike the conventional cycle-GAN, our discriminator classifies whether a pair of input real and generated samples corresponds to the desired emotion conversion (e.g., A→B) or to its inverse (B→A). We will show that this setup, which we refer to as a variational cycle-GAN (VCGAN), is equivalent to minimizing the empirical KL divergence between the source features and their cyclic counterpart. In addition, our generator combines a trainable deep network with a fixed generative block to implement a smooth and invertible transformation on the input features, in our case, the fundamental frequency (F0) contour. This hybrid architecture regularizes our adversarial training procedure. We use crowd sourcing to evaluate both the emotional saliency and the quality of synthesized speech. Finally, we show that our model generalizes to new speakers by modifying speech produced by Wavenet.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Noé Tits|AUTHOR Noé Tits]], [[Kevin El Haddad|AUTHOR Kevin El Haddad]], [[Thierry Dutoit|AUTHOR Thierry Dutoit]]
</p><p class="cpabstractcardaffiliationlist">Université de Mons, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3401–3405&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite the growing interest for expressive speech synthesis, synthesis of nonverbal expressions is an under-explored area. In this paper we propose an audio laughter synthesis system based on a sequence-to-sequence TTS synthesis system. We leverage transfer learning by training a deep learning model to learn to generate both speech and laughs from annotations. We evaluate our model with a listening test, comparing its performance to an HMM-based laughter synthesis one and assess that it reaches higher perceived naturalness. Our solution is a first step towards a TTS system that would be able to synthesize speech with a control on amusement level with laughter integration.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuexin Cao|AUTHOR Yuexin Cao]]^^1^^, [[Zhengchen Liu|AUTHOR Zhengchen Liu]]^^1^^, [[Minchuan Chen|AUTHOR Minchuan Chen]]^^1^^, [[Jun Ma|AUTHOR Jun Ma]]^^1^^, [[Shaojun Wang|AUTHOR Shaojun Wang]]^^2^^, [[Jing Xiao|AUTHOR Jing Xiao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ping An Technology, China; ^^2^^Ping An Technology, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3406–3410&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a nonparallel emotional speech conversion (ESC) method based on Variational AutoEncoder-Generative Adversarial Network (VAE-GAN). Emotional speech conversion aims at transforming speech from one source emotion to that of a target emotion without changing the speaker’s identity and linguistic content. In this work, an encoder is trained to elicit the content-related representations from acoustic features. Emotion-related representations are extracted in a supervised manner. Then the transformation between emotion-related representations from different domains is learned using an improved cycle-consistent Generative Adversarial Network (CycleGAN). Finally, emotion conversion is performed by eliciting and recombining the content-related representations of the source speech and the emotion-related representations of the target emotion. Subjective evaluation experiments are conducted and the results show that the proposed method outperforms the baseline in terms of voice quality and emotion conversion ability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexander Sorin|AUTHOR Alexander Sorin]], [[Slava Shechtman|AUTHOR Slava Shechtman]], [[Ron Hoory|AUTHOR Ron Hoory]]
</p><p class="cpabstractcardaffiliationlist">IBM, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3411–3415&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a novel semi-supervised technique that enables expressive style control and cross-speaker transfer in neural text to speech (TTS), when available training data contains a limited amount of labeled expressive speech from a single speaker. The technique is based on unsupervised learning of a style-related latent space, generated by a previously proposed reference audio encoding technique, and transforming it by means of Principal Component Analysis to another low-dimensional space. The latter space represents style information in a purified form, disentangled from text and speaker-related information. Encodings for expressive styles that are present in the training data are easily constructed in this space. Furthermore, this technique provides control over the speech rate, pitch level, and articulation type that can be used for TTS voice transformation. 

We present the results of subjective crowd evaluations confirming that the synthesized speech convincingly conveys the desired expressive styles and preserves a high level of quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kun Zhou|AUTHOR Kun Zhou]]^^1^^, [[Berrak Sisman|AUTHOR Berrak Sisman]]^^2^^, [[Mingyang Zhang|AUTHOR Mingyang Zhang]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^SUTD, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3416–3420&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotional voice conversion aims to convert the emotion of speech from one state to another while preserving the linguistic content and speaker identity. The prior studies on emotional voice conversion are mostly carried out under the assumption that emotion is speaker-dependent. We consider that there is a common code between speakers for emotional expression in a spoken language, therefore, a speaker-independent mapping between emotional states is possible. In this paper, we propose a speaker-independent emotional voice conversion framework, that can convert anyone’s emotion without the need for parallel data. We propose a VAW-GAN based encoder-decoder structure to learn the spectrum and prosody mapping. We perform prosody conversion by using continuous wavelet transform (CWT) to model the temporal dependencies. We also investigate the use of F0 as an additional input to the decoder to improve emotion conversion performance. Experiments show that the proposed speaker-independent framework achieves competitive results for both seen and unseen speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kento Matsumoto|AUTHOR Kento Matsumoto]], [[Sunao Hara|AUTHOR Sunao Hara]], [[Masanobu Abe|AUTHOR Masanobu Abe]]
</p><p class="cpabstractcardaffiliationlist">Okayama University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3421–3425&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a method to enhance the controllability of a Speech-like Emotional Sound (SES). In our previous study, we proposed an algorithm to generate SES by employing WaveNet as a sound generator and confirmed that SES can successfully convey emotional information. The proposed algorithm generates SES using only emotional IDs, which results in having no linguistic information. We call the generated sounds “speech-like” because they sound as if they are uttered by human beings although they contain no linguistic information. We could synthesize natural sounding acoustic signals that are fairly different from vocoder sounds to make the best use of WaveNet. To flexibly control the strength of emotions, this paper proposes to use a state of voiced, unvoiced, and silence (VUS) as auxiliary features. Three types of emotional speech, namely, neutral, angry, and happy, were generated and subjectively evaluated. Experimental results reveal the following: (1) VUS can control the strength of SES by changing the durations of VUS states, (2) VUS with narrow F0 distribution can express stronger emotions than that with wide F0 distribution, and (3) the smaller the unvoiced percentage is, the stronger the emotional impression is.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guangyan Zhang|AUTHOR Guangyan Zhang]], [[Ying Qin|AUTHOR Ying Qin]], [[Tan Lee|AUTHOR Tan Lee]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3426–3430&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents an extension of the Tacotron 2 end-to-end speech synthesis architecture, which aims to learn syllable-level discrete prosodic representations from speech data. The learned representations can be used for transferring or controlling prosody in expressive speech generation. The proposed design starts with a syllable-level text encoder that encodes input text at syllable level instead of phoneme level. The continuous prosodic representation for each syllable is then extracted. A Vector-Quantised Variational Auto-Encoder (VQ-VAE) is used to discretize the learned continuous prosodic representations. The discrete representations are finally concatenated with text encoder output to achieve prosody transfer or control. Subjective evaluation is carried out on the syllable-level TTS system, and the effectiveness of prosody transfer. The results show that the proposed Syllable-level neural TTS system produce more natural speech than conventional phoneme-level TTS system. It is also shown that prosody transfer could be achieved and the latent prosody codes are explainable with relation to specific prosody variation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takuya Kishida|AUTHOR Takuya Kishida]], [[Shin Tsukamoto|AUTHOR Shin Tsukamoto]], [[Toru Nakashika|AUTHOR Toru Nakashika]]
</p><p class="cpabstractcardaffiliationlist">University of Electro-Communications, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3431–3435&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a multiple-domain adaptive restricted Boltzmann machine (MDARBM) for simultaneous conversion of speaker identity and emotion. This study is motivated by the assumption that representing multiple domains (e.g., speaker identity, emotion, accent) of speech explicitly in a single model is beneficial to reduce the effects from other domains when the model learns one domain’s characteristics. The MDARBM decomposes the visible-hidden connections of an RBM into domain-specific factors and a domain-independent factor to make it adaptable to multiple domains of speech. By switching the domain-specific factors from the source speaker and emotion to the target ones, the model can perform a simultaneous conversion. Experimental results showed that the target domain conversion task was enhanced by the other in the simultaneous conversion framework. In a two-domain conversion task, the MDARBM outperformed a combination of ARBMs independently trained with speaker-identity and emotion units.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Soo-Whan Chung|AUTHOR Soo-Whan Chung]]^^1^^, [[Soyeon Choe|AUTHOR Soyeon Choe]]^^2^^, [[Joon Son Chung|AUTHOR Joon Son Chung]]^^2^^, [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Yonsei University, Korea; ^^2^^Naver, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3481–3485&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The objective of this paper is to separate a target speaker’s speech from a mixture of two speakers using a deep audio-visual speech separation network. Unlike previous works that used lip movement on video clips or pre-enrolled speaker information as an auxiliary conditional feature, we use a single face image of the target speaker. In this task, the conditional feature is obtained from facial appearance in cross-modal biometric task, where audio and visual identity representations are shared in latent space. Learnt identities from facial images enforce the network to isolate matched speakers and extract the voices from mixed speech. It solves the permutation problem caused by swapped channel outputs, frequently occurred in speech separation tasks. The proposed method is far more practical than video-based speech separation since user profile images are readily available on many platforms. Also, unlike speaker-aware separation methods, it is applicable on separation with unseen speakers who have never been enrolled before. We show strong qualitative and quantitative results on challenging real-world examples.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vighnesh Reddy Konda|AUTHOR Vighnesh Reddy Konda]]^^1^^, [[Mayur Warialani|AUTHOR Mayur Warialani]]^^1^^, [[Rakesh Prasanth Achari|AUTHOR Rakesh Prasanth Achari]]^^1^^, [[Varad Bhatnagar|AUTHOR Varad Bhatnagar]]^^1^^, [[Jayaprakash Akula|AUTHOR Jayaprakash Akula]]^^1^^, [[Preethi Jyothi|AUTHOR Preethi Jyothi]]^^1^^, [[Ganesh Ramakrishnan|AUTHOR Ganesh Ramakrishnan]]^^1^^, [[Gholamreza Haffari|AUTHOR Gholamreza Haffari]]^^2^^, [[Pankaj Singh|AUTHOR Pankaj Singh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IIT Bombay, India; ^^2^^Monash University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3525–3529&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Understanding videos via captioning has gained a lot of traction recently. While captions are provided alongside videos, the information about where a caption aligns within a video is missing, which could be particularly useful for indexing and retrieval. Existing work on learning to infer alignments has mostly exploited visual features and ignored the audio signal. Video understanding applications often underestimate the importance of the audio modality. We focus on how to make effective use of the audio modality for temporal localization of captions within videos. We release a new audio-visual dataset that has captions time-aligned by (i) carefully listening to the audio and watching the video, and (ii) watching only the video. Our dataset is audio-rich and contains captions in two languages, English and Marathi (a low-resource language). We further propose an attention-driven multimodal model, for effective utilization of both audio and video for temporal localization. We then investigate (i) the effects of audio in both data preparation and model design, and (ii) effective pretraining strategies (Audioset, ASR-bottleneck features, PASE, etc.) handling low-resource setting to help extract rich audio representations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Soo-Whan Chung|AUTHOR Soo-Whan Chung]]^^1^^, [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]^^1^^, [[Joon Son Chung|AUTHOR Joon Son Chung]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Yonsei University, Korea; ^^2^^Naver, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3486–3490&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The goal of this work is to train discriminative cross-modal embeddings without access to manually annotated data. Recent advances in self-supervised learning have shown that effective representations can be learnt from natural cross-modal synchrony. We build on earlier work to train embeddings that are more discriminative for uni-modal downstream tasks. To this end, we propose a novel training strategy that not only optimises metrics across modalities, but also enforces intra-class feature separation within each of the modalities. The effectiveness of the method is demonstrated on two downstream tasks: lip reading using the features trained on audio-visual synchronisation, and speaker recognition using the features trained for cross-modal biometric matching. The proposed method outperforms state-of-the-art self-supervised baselines by a significant margin.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michael Wand|AUTHOR Michael Wand]], [[Jürgen Schmidhuber|AUTHOR Jürgen Schmidhuber]]
</p><p class="cpabstractcardaffiliationlist">IDSIA, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3491–3495&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study we investigate architectures for modality fusion in audiovisual speech recognition, where one aims to alleviate the adverse effect of acoustic noise on the speech recognition accuracy by using video images of the speaker’s face as an additional modality. Starting from an established neural network fusion system, we substantially improve the recognition accuracy by taking single-modality losses into account: late fusion (at the output logits level) is substantially more robust than the baseline, in particular for unseen acoustic noise, at the expense of having to determine the optimal weighting of the input streams. The latter requirement can be removed by making the fusion itself a trainable part of the network.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jianwei Yu|AUTHOR Jianwei Yu]]^^1^^, [[Bo Wu|AUTHOR Bo Wu]]^^2^^, [[Rongzhi Gu|AUTHOR Rongzhi Gu]]^^2^^, [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]]^^3^^, [[Lianwu Chen|AUTHOR Lianwu Chen]]^^2^^, [[Yong Xu|AUTHOR Yong Xu]]^^3^^, [[Meng Yu|AUTHOR Meng Yu]]^^3^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Dong Yu|AUTHOR Dong Yu]]^^3^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^Tencent, China; ^^3^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3496–3500&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) of overlapped speech remains a highly challenging task to date. To this end, multi-channel microphone array data are widely used in state-of-the-art ASR systems. Motivated by the invariance of visual modality to acoustic signal corruption, this paper presents an audio-visual multi-channel overlapped speech recognition system featuring tightly integrated separation front-end and recognition backend. A series of audio-visual multi-channel speech separation front-end components based on //TF masking//, //filter & sum// and //mask-based MVDR// beamforming approaches were developed. To reduce the error cost mismatch between the separation and recognition components, they were jointly fine-tuned using the connectionist temporal classification (CTC) loss function, or a multi-task criterion interpolation with scale-invariant signal to noise ratio (Si-SNR) error cost. Experiments suggest that the proposed multi-channel AVSR system outperforms the baseline audio-only ASR system by up to 6.81% (26.83% relative) and 22.22% (56.87% relative) absolute word error rate (WER) reduction on overlapped speech constructed using either simulation or replaying of the lipreading sentence 2 (LRS2) dataset respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wubo Li|AUTHOR Wubo Li]], [[Dongwei Jiang|AUTHOR Dongwei Jiang]], [[Wei Zou|AUTHOR Wei Zou]], [[Xiangang Li|AUTHOR Xiangang Li]]
</p><p class="cpabstractcardaffiliationlist">DiDi Chuxing, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3501–3505&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audio Visual Scene-aware Dialog (AVSD) is a task to generate responses when discussing about a given video. The previous state-of-the-art model shows superior performance for this task using Transformer-based architecture. However, there remain some limitations in learning better representation of modalities. Inspired by Neural Machine Translation (NMT), we propose the Transformer-based Modal Translator (TMT) to learn the representations of the source modal sequence by translating the source modal sequence to the related target modal sequence in a supervised manner. Based on Multimodal Transformer Networks (MTN), we apply TMT to video and dialog, proposing MTN-TMT for the video-grounded dialog system. On the AVSD track of the Dialog System Technology Challenge 7, MTN-TMT outperforms the MTN and other submission models in both Video and Text task and Text Only task. Compared with MTN, MTN-TMT improves all metrics, especially, achieving relative improvement up to 14.1% on CIDEr.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[George Sterpu|AUTHOR George Sterpu]], [[Christian Saam|AUTHOR Christian Saam]], [[Naomi Harte|AUTHOR Naomi Harte]]
</p><p class="cpabstractcardaffiliationlist">Trinity College Dublin, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3506–3509&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The audio-visual speech fusion strategy AV Align has shown significant performance improvements in audio-visual speech recognition (AVSR) on the challenging LRS2 dataset. Performance improvements range between 7% and 30% depending on the noise level when leveraging the visual modality of speech in addition to the auditory one. This work presents a variant of AV Align where the recurrent Long Short-term Memory (LSTM) computation block is replaced by the more recently proposed Transformer block. We compare the two methods, discussing in greater detail their strengths and weaknesses. We find that Transformers also learn cross-modal monotonic alignments, but suffer from the same visual convergence problems as the LSTM model, calling for a deeper investigation into the dominant modality problem in machine learning.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexandros Koumparoulis|AUTHOR Alexandros Koumparoulis]]^^1^^, [[Gerasimos Potamianos|AUTHOR Gerasimos Potamianos]]^^1^^, [[Samuel Thomas|AUTHOR Samuel Thomas]]^^2^^, [[Edmilson da Silva Morais|AUTHOR Edmilson da Silva Morais]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Thessaly, Greece; ^^2^^IBM, USA; ^^3^^IBM, Brazil</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3510–3514&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We focus on the problem of efficient architectures for lipreading that allow trading-off computational resources for visual speech recognition accuracy. In particular, we make two contributions: First, we introduce MobiLipNetV3, an efficient and accurate lipreading model, based on our earlier work on MobiLipNetV2 and incorporating recent advances in convolutional neural network architectures. Second, we propose a novel recognition paradigm, called MultiRate Ensemble (MRE), that combines a “lean” and a “full” MobiLipNetV3 in the lipreading pipeline, with the latter applied at a lower frame rate. This architecture yields a family of systems offering multiple accuracy vs. efficiency operating points depending on the frame-rate decimation of the “full” model, thus allowing adaptation to the available device resources. We evaluate our approach on the TCD-TIMIT corpus, popular in speaker-independent lipreading of continuous speech. The proposed MRE family of systems can be up to 73 times more efficient compared to residual neural network based lipreading, and up to twice as MobiLipNetV2, while in both cases reaching up to 8% absolute WER reduction, depending on the MRE chosen operating point. For example, a temporal decimation of three yields a 7% absolute WER reduction and a 26% relative decrease in computations over MobiLipNetV2.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Masood S. Mortazavi|AUTHOR Masood S. Mortazavi]]
</p><p class="cpabstractcardaffiliationlist">Futurewei Technologies, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3515–3519&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Semantically-aligned (//speech; image//) datasets can be used to explore “visually-grounded speech”. In a majority of existing investigations, features of an image signal are extracted using neural networks “pre-trained” on other tasks (e.g., classification on ImageNet). In still others, pre-trained networks are used to extract audio features prior to semantic embedding. Without “transfer learning” through pre-trained initialization or pre-trained feature extraction, previous results have tended to show low rates of recall in //speech// → //image// and //image// → //speech// queries.

Choosing appropriate neural architectures for encoders in the speech and image branches and using large datasets, one can obtain competitive recall rates without any reliance on any pre-trained initialization or feature extraction: (//speech; image//) semantic alignment and //speech// → //image// and //image// → //speech// retrieval are canonical tasks worthy of independent investigation of their own and allow one to explore other questions — e.g., the size of the audio embedder can be reduced significantly with little loss of recall rates in //speech// → //image// and //image// → //speech// queries.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hong Liu|AUTHOR Hong Liu]], [[Zhan Chen|AUTHOR Zhan Chen]], [[Bing Yang|AUTHOR Bing Yang]]
</p><p class="cpabstractcardaffiliationlist">Peking University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3520–3524&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Current studies have shown that extracting representative visual features and efficiently fusing audio and visual modalities are vital for audio-visual speech recognition (AVSR), but these are still challenging. To this end, we propose a lip graph assisted AVSR method with bidirectional synchronous fusion. First, a hybrid visual stream combines the image branch and graph branch to capture discriminative visual features. Specially, the lip graph exploits the natural and dynamic connections between the lip key points to model the lip shape, and the temporal evolution of the lip graph is captured by the graph convolutional networks followed by bidirectional gated recurrent units. Second, the hybrid visual stream is combined with the audio stream by an attention-based bidirectional synchronous fusion which allows bidirectional information interaction to resolve the asynchrony between the two modalities during fusion. The experimental results on LRW-BBC dataset show that our method outperforms the end-to-end AVSR baseline method in both clean and noisy conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pablo Gimeno|AUTHOR Pablo Gimeno]], [[Victoria Mingote|AUTHOR Victoria Mingote]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]
</p><p class="cpabstractcardaffiliationlist">Universidad de Zaragoza, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3067–3071&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art music detection systems, whose aim is to distinguish whether or not music is present in an audio signal, rely mainly on deep learning approaches. However, these kind of solutions are strongly dependent on the amount of data they were trained on. In this paper, we introduce the area under the ROC curve (AUC) and partial AUC (pAUC) optimisation techniques, recently developed for neural networks, into the music detection task, seeking to overcome the issues derived from data limitation. Using recurrent neural networks as the main element in the system and with a limited training set of around 20 hours of audio, we explore different approximations to threshold-independent training objectives. Furthermore, we propose a novel training objective based on the decomposition of the area under the ROC curve as the sum of two partial areas under the ROC curve. Experimental results show that partial AUC optimisation can improve the performance of music detection systems significantly compared to traditional training criteria such as cross entropy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Niko Moritz|AUTHOR Niko Moritz]], [[Gordon Wichern|AUTHOR Gordon Wichern]], [[Takaaki Hori|AUTHOR Takaaki Hori]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]
</p><p class="cpabstractcardaffiliationlist">MERL, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3112–3116&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR), audio tagging (AT), and acoustic event detection (AED) are typically treated as separate problems, where each task is tackled using specialized system architectures. This is in contrast with the way the human auditory system uses a single (binaural) pathway to process sound signals from different sources. In addition, an acoustic model trained to recognize speech as well as sound events could leverage multi-task learning to alleviate data scarcity problems in individual tasks. In this work, an all-in-one (AIO) acoustic model based on the Transformer architecture is trained to solve ASR, AT, and AED tasks simultaneously, where model parameters are shared across all tasks. For the ASR and AED tasks, the Transformer model is combined with the connectionist temporal classification (CTC) objective to enforce a monotonic ordering and to utilize timing information. Our experiments demonstrate that the AIO Transformer achieves better performance compared to all baseline systems of various recent DCASE challenge tasks and is suitable for the //total transcription// of an acoustic scene, i.e., to simultaneously transcribe speech and recognize the acoustic events occurring in it.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marvin Lavechin|AUTHOR Marvin Lavechin]]^^1^^, [[Ruben Bousbib|AUTHOR Ruben Bousbib]]^^1^^, [[Hervé Bredin|AUTHOR Hervé Bredin]]^^2^^, [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]]^^1^^, [[Alejandrina Cristia|AUTHOR Alejandrina Cristia]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LSCP (UMR 8554), France; ^^2^^LIMSI (UPR 3251), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3072–3076&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spontaneous conversations in real-world settings such as those found in child-centered recordings have been shown to be amongst the most challenging audio files to process. Nevertheless, building speech processing models handling such a wide variety of conditions would be particularly useful for language acquisition studies in which researchers are interested in the quantity and quality of the speech that children hear and produce, as well as for early diagnosis and measuring effects of remediation. In this paper, we present our approach to designing an open-source neural network to classify audio segments into vocalizations produced by the child wearing the recording device, vocalizations produced by other children, adult male speech, and adult female speech. To this end, we gathered diverse child-centered corpora which sums up to a total of 260 hours of recordings and covers 10 languages. Our model can be used as input for downstream tasks such as estimating the number of words produced by adult speakers, or the number of linguistic units produced by children. Our architecture combines SincNet filters with a stack of recurrent layers and outperforms by a large margin the state-of-the-art system, the Language ENvironment Analysis (LENA) that has been used in numerous child language studies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chao Peng|AUTHOR Chao Peng]], [[Xihong Wu|AUTHOR Xihong Wu]], [[Tianshu Qu|AUTHOR Tianshu Qu]]
</p><p class="cpabstractcardaffiliationlist">Peking University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3077–3081&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a method for estimating the competing speaker count with deep spectral and spatial embedding fusion. The basic idea is that mixed speech can be projected into an embedding space using neural networks where embedding vectors are orthogonal for different speakers while parallel for the same speaker. Therefore, speaker count estimation can be performed by computing the rank of the mean covariance matrix of the embedding vectors. It is also a feature combination method in speaker embedding space instead of simply combining features at the input layer of neural networks. Experimental results show that embedding-based method is better than classification-based method where the network directly predicts the count of speakers and spatial features help to speaker count estimation. In addition, the features combined in the embedding space can achieve more accurate speaker counting than features combined at the input layer of neural networks when tested on anechoic and reverberant datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shoufeng Lin|AUTHOR Shoufeng Lin]], [[Xinyuan Qian|AUTHOR Xinyuan Qian]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3082–3086&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multi-speaker tracking using both audio and video modalities is a key task in human-robot interaction and video conferencing. The complementary nature of audio and video signals improves the tracking robustness against noise and outliers compared to the uni-modal approaches. However, the online tracking of multiple speakers via audio-video fusion, especially without the target number prior, is still an open challenge. In this paper, we propose a Generalized Labelled Multi-Bernoulli (GLMB)-based framework that jointly estimates the number of targets and their respective states online. Experimental results using the AV16.3 dataset demonstrate the effectiveness of the proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuo Liu|AUTHOR Shuo Liu]], [[Andreas Triantafyllopoulos|AUTHOR Andreas Triantafyllopoulos]], [[Zhao Ren|AUTHOR Zhao Ren]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]
</p><p class="cpabstractcardaffiliationlist">Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3087–3091&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work discusses the impact of human voice on acoustic scene classification (ASC) systems. Typically, such systems are trained and evaluated on data sets lacking human speech. We show experimentally that the addition of speech can be detrimental to system performance. Furthermore, we propose two alternative solutions to mitigate that effect in the context of deep neural networks (DNNs). We first utilise data augmentation to make the algorithm robust against the presence of human speech in the data. We also introduce a voice-suppression algorithm that removes human speech from audio recordings, and test the DNN classifier on those denoised samples. Experimental results show that both approaches reduce the negative effects of human voice in ASC systems. Compared to using data augmentation, applying voice suppression achieved better classification accuracy and managed to perform more stably for different speech intensity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Junzhe Zhu|AUTHOR Junzhe Zhu]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]], [[Leda Sarı|AUTHOR Leda Sarı]]
</p><p class="cpabstractcardaffiliationlist">University of Illinois at Urbana-Champaign, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3092–3096&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In scenarios where multiple speakers talk at the same time, it is important to be able to identify the talkers accurately. This paper presents an end-to-end system that integrates speech source extraction and speaker identification, and proposes a new way to jointly optimize these two parts by max-pooling the speaker predictions along the channel dimension. Residual attention permits us to learn spectrogram masks that are optimized for the purpose of speaker identification, while residual forward connections permit dilated convolution with a sufficiently large context window to guarantee correct streaming across syllable boundaries. End-to-end training results in a system that recognizes one speaker in a two-speaker broadcast speech mixture with 99.9% accuracy and both speakers with 93.9% accuracy, and that recognizes all speakers in three-speaker scenarios with 81.2% accuracy.¹</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Thilo von Neumann|AUTHOR Thilo von Neumann]]^^1^^, [[Christoph Boeddeker|AUTHOR Christoph Boeddeker]]^^1^^, [[Lukas Drude|AUTHOR Lukas Drude]]^^1^^, [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]]^^2^^, [[Marc Delcroix|AUTHOR Marc Delcroix]]^^2^^, [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]^^2^^, [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Paderborn, Germany; ^^2^^NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3097–3101&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most approaches to multi-talker overlapped speech separation and recognition assume that the number of simultaneously active speakers is given, but in realistic situations, it is typically unknown. To cope with this, we extend an iterative speech extraction system with mechanisms to count the number of sources and combine it with a single-talker speech recognizer to form the first end-to-end multi-talker automatic speech recognition system for an unknown number of active speakers. Our experiments show very promising performance in counting accuracy, source separation and speech recognition on simulated clean mixtures from WSJ0-2mix and WSJ0-3mix. Among others, we set a new state-of-the-art word error rate on the WSJ0-2mix database. Furthermore, our system generalizes well to a larger number of speakers than it ever saw during training, as shown in experiments with the WSJ0-4mix database.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shreya G. Upadhyay|AUTHOR Shreya G. Upadhyay]], [[Bo-Hao Su|AUTHOR Bo-Hao Su]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3102–3106&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A well-trained Acoustic Sound Event Detection system captures the patterns of the sound to accurately detect events of interest in an auditory scene, which enables applications across domains of multimedia, smart living, and even health monitoring. Due to the scarcity and the weak labelling nature of the sound event data, it is often challenging to train an accurate and robust acoustic event detection model directly, especially for those rare occurrences. In this paper, we proposed an architecture which takes the advantage of integrating ASR network representations as additional input when training a sound event detector. Here we used the convolutional bi-directional recurrent neural network (CBRNN), which includes both spectral and temporal attentions, as the SED classifier and further combined the ASR feature representations when performing the end-to-end CBRNN training. Our experiments on the TUT 2017 rare sound event detection dataset showed that with the inclusion of ASR features, the overall discriminative performance of the end-to-end sound event detection system has improved; the average performance of our proposed framework in terms of f-score and error rates are 97% and 0.05% respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Samuele Cornell|AUTHOR Samuele Cornell]]^^1^^, [[Maurizio Omologo|AUTHOR Maurizio Omologo]]^^2^^, [[Stefano Squartini|AUTHOR Stefano Squartini]]^^1^^, [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Università Politecnica delle Marche, Italy; ^^2^^FBK, Italy; ^^3^^Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3107–3111&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We consider the problem of detecting the activity and counting overlapping speakers in distant-microphone recordings. We treat supervised Voice Activity Detection (VAD), Overlapped Speech Detection (OSD), joint VAD+OSD, and speaker counting as instances of a general Overlapped Speech Detection and Counting (OSDC) task, and we design a Temporal Convolutional Network (TCN) based method to address it. We show that TCNs significantly outperform state-of-the-art methods on two real-world distant speech datasets. In particular our best architecture obtains, for OSD, 29.1% and 25.5% absolute improvement in Average Precision over previous techniques on, respectively, the AMI and CHiME-6 datasets. Furthermore, we find that generalization for joint VAD+OSD improves by using a speaker counting objective rather than a VAD+OSD objective. We also study the effectiveness of forced alignment based labeling and data augmentation, and show that both can improve OSD performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lorenz Diener|AUTHOR Lorenz Diener]]^^1^^, [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]]^^2^^, [[Catarina Botelho|AUTHOR Catarina Botelho]]^^3^^, [[Kevin Scheck|AUTHOR Kevin Scheck]]^^1^^, [[Dennis Küster|AUTHOR Dennis Küster]]^^1^^, [[Isabel Trancoso|AUTHOR Isabel Trancoso]]^^3^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^2^^, [[Tanja Schultz|AUTHOR Tanja Schultz]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Bremen, Germany; ^^2^^Universität Augsburg, Germany; ^^3^^INESC-ID Lisboa, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3117–3121&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Silent Computational Paralinguistics (SCP) — the assessment of speaker states and traits from non-audibly spoken communication — has rarely been targeted in the rich body of either Computational Paralinguistics or Silent Speech Processing. Here, we provide first steps towards this challenging but potentially highly rewarding endeavour: Paralinguistics can enrich spoken language interfaces, while Silent Speech Processing enables confidential and unobtrusive spoken communication for everybody, including mute speakers. We approach SCP by using speech-related biosignals stemming from facial muscle activities captured by surface electromyography (EMG). To demonstrate the feasibility of SCP, we select one speaker trait (speaker identity) and one speaker state (speaking mode). We introduce two promising strategies for SCP: (1) deriving paralinguistic speaker information directly from EMG of silently produced speech versus (2) first converting EMG into an audible speech signal followed by conventional computational paralinguistic methods. We compare traditional feature extraction and decision making approaches to more recent deep representation and transfer learning by convolutional and recurrent neural networks, using openly available EMG data. We find that paralinguistics can be assessed not only from acoustic speech but also from silent speech captured by EMG.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shun-Chang Zhong|AUTHOR Shun-Chang Zhong]]^^1^^, [[Bo-Hao Su|AUTHOR Bo-Hao Su]]^^1^^, [[Wei Huang|AUTHOR Wei Huang]]^^2^^, [[Yi-Ching Liu|AUTHOR Yi-Ching Liu]]^^3^^, [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Tsing Hua University; ^^2^^Gamania Digital Entertainment; ^^3^^National Taiwan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3122–3126&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent works have demonstrated that the integration of group-level personality and vocal behaviors can provide enhanced prediction power on task performance for small group interactions. In this work, we propose that the impact of member personality for task performance prediction in groups should be explicitly modeled from both //intra// and //inter//-group perspectives. Specifically, we propose a Graph Interlocutor Acoustic Network (G-IAN) architecture that jointly learns the relationship between vocal behaviors and personality attributes with intra-group attention and inter-group graph convolutional layer. We evaluate our proposed G-IAN on two group interaction databases and achieve 78.4% and 72.2% group performance classification accuracy, which outperforms the baseline model that models vocal behavior only by 14% absolute. Further, our analysis shows that Agreeableness and Conscientiousness demonstrate a clear positive impact in our model that leverages the inter-group personality structure for enhanced task performance prediction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]
</p><p class="cpabstractcardaffiliationlist">MTA-SZTE RGAI, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3127–3131&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The automatic detection of conflict situations from human speech has several applications like obtaining feedback of employees in call centers, the surveillance of public spaces, and other roles in human-computer interactions. Although several methods have been developed to automatic conflict detection, they were designed to operate on relatively long utterances. In practice, however, it would be beneficial to process much shorter speech segments. With the traditional workflow of paralinguistic speech processing, this would require properly annotated training and testing material consisting of short clips. In this study we show that Support Vector Regression machine learning models using Fisher vectors as features, even when trained on longer utterances, allow us to efficiently and accurately detect conflict intensity from very short audio segments. Even without having reliable annotations of these such short chunks, the mean scores of the predictions corresponding to short segments of the same original, longer utterances correlate well to the reference manual annotation. We also verify the validity of this approach by comparing the SVM predictions of the chunks with a manual annotation for the full and the 5-second-long cases. Our findings allow the construction of conflict detection systems having smaller delay, therefore being more useful in practice.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hiroki Mori|AUTHOR Hiroki Mori]], [[Yuki Kikuchi|AUTHOR Yuki Kikuchi]]
</p><p class="cpabstractcardaffiliationlist">Utsunomiya University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3132–3135&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Screams in everyday conversation, rather than in emergencies, are considered as nonverbal behavior that makes our speech communication rich and expressive. This paper focuses on screams in conversation. Identification of screams in existing spoken dialog corpora revealed that these corpora contained only a small number of screams, so not adequate for the investigation of the screams. In order to obtain more screams that naturally occur in conversation, we recorded dialogs while playing highly action-oriented games. Following to our criteria to identify screams, 1437 screams were detected from the whole recordings. The screams in our corpus were 12 times more frequent than the existing gaming corpus. As for the number of screams per minute, a strong positive correlation was observed between two speakers of the same pair, suggesting that the interlocutors produced screams not purely spontaneously, but tried to get the screaming behavior closer to the other person. Results of the acoustic analysis showed that the typical scream is produced in 60–140 mel higher and 8 dB louder voice than typical normal speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amber Afshan|AUTHOR Amber Afshan]], [[Jody Kreiman|AUTHOR Jody Kreiman]], [[Abeer Alwan|AUTHOR Abeer Alwan]]
</p><p class="cpabstractcardaffiliationlist">University of California at Los Angeles, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3136–3140&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Does speaking style variation affect humans’ ability to distinguish individuals from their voices? How do humans compare with automatic systems designed to discriminate between voices? In this paper, we attempt to answer these questions by comparing human and machine speaker discrimination performance for read speech versus casual conversations. Thirty listeners were asked to perform a same versus different speaker task. Their performance was compared to a state-of-the-art x-vector/PLDA-based automatic speaker verification system. Results showed that both humans and machines performed better with style-matched stimuli, and human performance was better when listeners were native speakers of American English. Native listeners performed better than machines in the style-matched conditions (EERs of 6.96% versus 14.35% for read speech, and 15.12% versus 19.87%, for conversations), but for style-mismatched conditions, there was no significant difference between native listeners and machines. In all conditions, fusing human responses with machine results showed improvements compared to each alone, suggesting that humans and machines have different approaches to speaker discrimination tasks. Differences in the approaches were further confirmed by examining results for individual speakers which showed that the perception of distinct and confused speakers differed between human listeners and machines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kamini Sabu|AUTHOR Kamini Sabu]], [[Preeti Rao|AUTHOR Preeti Rao]]
</p><p class="cpabstractcardaffiliationlist">IIT Bombay, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3141–3145&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Perceived speaker confidence or certainty has been found to correlate with lexical and acoustic-prosodic features in the spontaneous speech of children interacting with an adult. We investigate the prediction of confidence in the context of oral reading of stories by children with good word recognition skills where we must rely purely on prosodic features. We report a dataset of oral reading recordings that has been manually rated for confidence at the level of text paragraphs of 50–70 words. Several acoustic features computed at different time scales are evaluated via a trained classifier for the prediction of the subjective ratings. Features based on pausing, pitch and speech rate are found to be important predictors of perceived confidence. Also it is seen that the ratings are influenced by signal properties computed across the utterance. When trained on recordings with strong rater agreement, the system predicts low confidence readers with an F-score of 0.70.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[W. Xue|AUTHOR W. Xue]]^^1^^, [[V. Mendoza Ramos|AUTHOR V. Mendoza Ramos]]^^2^^, [[W. Harmsen|AUTHOR W. Harmsen]]^^1^^, [[Catia Cucchiarini|AUTHOR Catia Cucchiarini]]^^1^^, [[R.W.N.M. van Hout|AUTHOR R.W.N.M. van Hout]]^^1^^, [[Helmer Strik|AUTHOR Helmer Strik]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Radboud Universiteit, The Netherlands; ^^2^^UZA, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3146–3150&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech intelligibility is an essential though complex construct in speech pathology. It is affected by multiple contextual variables and it is often measured in different ways. In this paper, we evaluate various measures of speech intelligibility based on orthographic transcriptions, with respect to their reliability and validity. For this study, different speech tasks were analyzed together with their respective perceptual ratings assigned by five experienced speech-language pathologists: a Visual Analogue Scale (VAS) and two types of orthographic transcriptions, one in terms of existing words and the other in terms of perceived segments, including nonsense words. Six subword measures concerning graphemes and phonemes were derived automatically from these transcriptions. All measures exhibit high degrees of reliability. Correlations between the six subword measures and three independent measures, VAS, word accuracy, and severity level, reveal that the measures extracted automatically from the orthographic transcriptions are valid predictors of speech intelligibility. The results also indicate differences between the speech tasks, suggesting that a comprehensive assessment of speech intelligibility requires materials from different speech tasks in combination with measures at different granularity levels: utterance, word, and subword. We discuss these results in relation to those of previous research and suggest possible avenues for future research.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Lin|AUTHOR Yi Lin]], [[Hongwei Ding|AUTHOR Hongwei Ding]]
</p><p class="cpabstractcardaffiliationlist">SJTU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3151–3155&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Communication channels and actor’s gender have been increasingly reported to influence emotion perception, but past literature exploring these two factors has largely been disassociated. The present study examined how emotions expressed by actors of the two genders are perceived in three different sensory channels (i.e. face, prosody, and semantics). Eighty-eight native Mandarin participants (43 females and 45 males) were asked to identify the emotion displayed visually through face, or auditorily through prosody or semantics in a fixed-choice format, in which accuracy and reaction time were recorded. Results revealed that visual facial expressions were more accurately and rapidly identified, particularly when posed by female actors. Additionally, emotion perception in the auditory modality was modulated by actor’s gender to a greater extent: emotional prosody yielded more accurate and faster responses when expressed by female than male actors, while emotional semantics produced better performances when presented by males. To sum up, paralinguistic (i.e., visual and prosodic) dominance effects are more evident in emotions expressed by female than male actors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ivo Anjos|AUTHOR Ivo Anjos]]^^1^^, [[Maxine Eskenazi|AUTHOR Maxine Eskenazi]]^^2^^, [[Nuno Marques|AUTHOR Nuno Marques]]^^1^^, [[Margarida Grilo|AUTHOR Margarida Grilo]]^^3^^, [[Isabel Guimarães|AUTHOR Isabel Guimarães]]^^4^^, [[João Magalhães|AUTHOR João Magalhães]]^^1^^, [[Sofia Cavaco|AUTHOR Sofia Cavaco]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NOVA, Portugal; ^^2^^Carnegie Mellon University, USA; ^^3^^ESSA, Portugal; ^^4^^ESSA, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3156–3160&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Children with fricative distortion errors have to learn how to correctly use the vocal folds, and which place of articulation to use in order to correctly produce the different fricatives. Here we propose a virtual tutor for fricatives distortion correction. This is a virtual tutor for speech and language therapy that helps children understand their fricative production errors and how to correctly use their speech organs. The virtual tutor uses log Mel filter banks and deep learning techniques with spectral-temporal convolutions of the data to classify the fricatives in children’s speech by place of articulation and voicing. It achieves an accuracy of 90.40% for place of articulation and 90.93% for voicing with children’s speech. Furthermore, this paper discusses a multidimensional advanced data analysis of the first layer convolutional kernel filters that validates the usefulness of performing the convolution on the log Mel filter bank.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haitong Zhang|AUTHOR Haitong Zhang]], [[Yue Lin|AUTHOR Yue Lin]]
</p><p class="cpabstractcardaffiliationlist">NetEase, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3161–3165&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, sequence-to-sequence models with attention have been successfully applied in Text-to-speech (TTS). These models can generate near-human speech with a large accurately-transcribed speech corpus. However, preparing such a large data-set is both expensive and laborious. To alleviate the problem of heavy data demand, we propose a novel unsupervised pre-training mechanism in this paper. Specifically, we first use Vector-quantization Variational-Autoencoder (VQ-VAE) to extract the unsupervised linguistic units from large-scale, publicly found, and untranscribed speech. We then pre-train the sequence-to-sequence TTS model by using the <unsupervised linguistic units, audio> pairs. Finally, we fine-tune the model with a small amount of <text, audio> paired data from the target speaker. As a result, both objective and subjective evaluations show that our proposed method can synthesize more intelligible and natural speech with the same amount of paired training data. Besides, we extend our proposed method to the hypothesized low-resource languages and verify the effectiveness of the method using objective evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jacob J. Webber|AUTHOR Jacob J. Webber]]^^1^^, [[Olivier Perrotin|AUTHOR Olivier Perrotin]]^^2^^, [[Simon King|AUTHOR Simon King]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Edinburgh, UK; ^^2^^GIPSA-lab (UMR 5216), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3206–3210&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce a prototype system for modifying an arbitrary parameter of a speech signal. Unlike signal processing approaches that require dedicated methods for different parameters, our system can — in principle — modify any control parameter that the signal can be annotated with. Our system comprises three neural networks. The ‘hider’ removes all information related to the control parameter, outputting a hidden embedding. The ‘finder’ is an adversary used to train the ‘hider’, attempting to detect the value of the control parameter from the hidden embedding. The ‘combiner’ network recombines the hidden embedding with a desired new value of the control parameter. The input and output to the system are mel-spectrograms and we employ a neural vocoder to generate the output speech waveform. As a proof of concept, we use F,,0,, as the control parameter. The system was evaluated in terms of control parameter accuracy and naturalness against a high quality signal processing method of F,,0,, modification that also works in the spectrogram domain. We also show that, with modifications only to training data, the system is capable of modifying the 1^^st^^ and 2^^nd^^ vocal tract formants, showing progress towards universal signal modification.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kasperi Palkama|AUTHOR Kasperi Palkama]], [[Lauri Juvela|AUTHOR Lauri Juvela]], [[Alexander Ilin|AUTHOR Alexander Ilin]]
</p><p class="cpabstractcardaffiliationlist">Aalto University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3166–3170&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper adapts a StyleGAN model for speech generation with minimal or no conditioning on text. StyleGAN is a multi-scale convolutional GAN capable of hierarchically capturing data structure and latent variation on multiple spatial (or temporal) levels. The model has previously achieved impressive results on facial image generation, and it is appealing to audio applications due to similar multi-level structures present in the data. In this paper, we train a StyleGAN to generate mel-spectrograms on the Speech Commands dataset, which contains spoken digits uttered by multiple speakers in varying acoustic conditions. In a conditional setting our model is conditioned on the digit identity, while learning the remaining data variation remains an unsupervised task. We compare our model to the current unsupervised state-of-the-art speech synthesis GAN architecture, the WaveGAN, and show that the proposed model outperforms according to numerical measures and subjective evaluation by listening tests.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jingzhou Yang|AUTHOR Jingzhou Yang]], [[Lei He|AUTHOR Lei He]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3171–3175&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper studies a multilingual sequence-to-sequence text-to-speech framework towards universal modeling, that is able to synthesize speech for any speaker in any language using a single model. This framework consists of a transformer-based acoustic predictor and a WaveNet neural vocoder, with global conditions from speaker and language networks. It is examined on a massive TTS data set with around 1250 hours of data from 50 language locales, and the amount of data in different locales is highly unbalanced. Although the multilingual model exhibits the transfer learning ability to benefit the low-resource languages, data imbalance still undermines the model performance. A data balance training strategy is successfully applied and effectively improves the voice quality of the low-resource languages. Furthermore, this paper examines the modeling capacity of extending to new speakers and languages, as a key step towards universal modeling. Experiments show 20 seconds of data is feasible for a new speaker and 6 minutes for a new language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kouichi Katsurada|AUTHOR Kouichi Katsurada]]^^1^^, [[Korin Richmond|AUTHOR Korin Richmond]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tokyo University of Science, Japan; ^^2^^University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3176–3180&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe a speaker-independent mel-cepstrum estimation system which accepts electromagnetic articulography (EMA) data as input. The system collects speaker information with d-vectors generated from the EMA data. We have also investigated the effect of speaker independence in the input vectors given to the mel-cepstrum estimator. This is accomplished by introducing a two-stage network, where the first stage is trained to output EMA sequences that are averaged across all speakers on a per-triphone basis (and so are speaker-independent) and the second receives these as input for mel-cepstrum estimation. Experimental results show that using the d-vectors can improve the performance of mel-cepstrum estimation by 0.19 dB with regard to mel-cepstrum distortion in the closed-speaker test set. Additionally, giving triphone-averaged EMA data to a mel-cepstrum estimator is shown to improve the performance by a further 0.16 dB, which indicates that the speaker-independent input has a positive effect on mel-cepstrum estimation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiangyu Liang|AUTHOR Xiangyu Liang]]^^1^^, [[Zhiyong Wu|AUTHOR Zhiyong Wu]]^^1^^, [[Runnan Li|AUTHOR Runnan Li]]^^2^^, [[Yanqing Liu|AUTHOR Yanqing Liu]]^^2^^, [[Sheng Zhao|AUTHOR Sheng Zhao]]^^2^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3181–3185&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With the development of sequence-to-sequence modeling algorithms, Text-to-Speech (TTS) techniques have achieved significant improvement in speech quality and naturalness. These deep learning algorithms, such as recurrent neural networks (RNNs) and its memory enhanced variations, have shown strong reconstruction ability from input linguistic features to acoustic features. However, the efficiency of these algorithms is limited for its sequential process in both training and inference. Recently, Transformer with superiority in parallelism is proposed to TTS. It employs the positional embedding instead of recurrent mechanism for position modeling and significantly boosts training speed. However, this approach lacks monotonic constraint and is deficient with issues like pronunciation skipping. Therefore, in this paper, we propose a monotonicity enhancing approach with the combining use of Stepwise Monotonic Attention (SMA) and multi-head attention for Transformer based TTS system. Experiments show the proposed approach can reduce bad cases from 53 of 500 sentences to 1, together with an improvement on MOS from 4.09 to 4.17 in the naturalness test.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Devang S. Ram Mohan|AUTHOR Devang S. Ram Mohan]]^^1^^, [[Raphael Lenain|AUTHOR Raphael Lenain]]^^2^^, [[Lorenzo Foglianti|AUTHOR Lorenzo Foglianti]]^^1^^, [[Tian Huey Teh|AUTHOR Tian Huey Teh]]^^1^^, [[Marlene Staib|AUTHOR Marlene Staib]]^^1^^, [[Alexandra Torresquintero|AUTHOR Alexandra Torresquintero]]^^1^^, [[Jiameng Gao|AUTHOR Jiameng Gao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Papercup Technologies, UK; ^^2^^Novoic, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3186–3190&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modern approaches to text to speech require the entire input character sequence to be processed before any audio is synthesised. This latency limits the suitability of such models for time-sensitive tasks like simultaneous interpretation. Interleaving the action of reading a character with that of synthesising audio reduces this latency. However, the order of this sequence of interleaved actions varies across sentences, which raises the question of how the actions should be chosen. We propose a reinforcement learning based framework to train an agent to make this decision. We compare our performance against that of deterministic, rule-based systems. Our results demonstrate that our agent successfully balances the trade-off between the latency of audio generation and the quality of synthesised audio. More broadly, we show that neural sequence-to-sequence models can be adapted to run in an incremental manner.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tao Tu|AUTHOR Tao Tu]], [[Yuan-Jui Chen|AUTHOR Yuan-Jui Chen]], [[Alexander H. Liu|AUTHOR Alexander H. Liu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3191–3195&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, end-to-end multi-speaker text-to-speech (TTS) systems gain success in the situation where a lot of high-quality speech plus their corresponding transcriptions are available. However, laborious paired data collection processes prevent many institutes from building multi-speaker TTS systems of great performance. In this work, we propose a semi-supervised learning approach for multi-speaker TTS. A multi-speaker TTS model can learn from the untranscribed audio via the proposed encoder-decoder framework with discrete speech representation. The experiment results demonstrate that with only an hour of paired speech data, whether the paired data is from multiple speakers or a single speaker, the proposed model can generate intelligible speech in different voices. We found the model can benefit from the proposed semi-supervised learning approach even when part of the unpaired speech data is noisy. In addition, our analysis reveals that different speaker characteristics of the paired data have an impact on the effectiveness of semi-supervised TTS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pramit Saha|AUTHOR Pramit Saha]], [[Sidney Fels|AUTHOR Sidney Fels]]
</p><p class="cpabstractcardaffiliationlist">University of British Columbia, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3196–3200&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The articulatory geometric configurations of the vocal tract and the acoustic properties of the resultant speech sound are considered to have a strong causal relationship. This paper aims at finding a joint latent representation between the articulatory and acoustic domain for vowel sounds via invertible neural network models, while simultaneously preserving the respective domain-specific features. Our model utilizes a convolutional autoencoder architecture and normalizing flow-based models to allow both forward and inverse mappings in a semi-supervised manner, between the mid-sagittal vocal tract geometry of a two degrees-of-freedom articulatory synthesizer with 1D acoustic wave model and the Mel-spectrogram representation of the synthesized speech sounds. Our approach achieves satisfactory performance in achieving both articulatory-to-acoustic as well as acoustic-to-articulatory mapping, thereby demonstrating our success in achieving a joint encoding of both the domains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuki Yamashita|AUTHOR Yuki Yamashita]]^^1^^, [[Tomoki Koriyama|AUTHOR Tomoki Koriyama]]^^1^^, [[Yuki Saito|AUTHOR Yuki Saito]]^^1^^, [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]]^^1^^, [[Yusuke Ijima|AUTHOR Yusuke Ijima]]^^2^^, [[Ryo Masumura|AUTHOR Ryo Masumura]]^^2^^, [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Tokyo, Japan; ^^2^^NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3201–3205&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we investigate the effectiveness of using rich annotations in deep neural network (DNN)-based statistical speech synthesis. General text-to-speech synthesis frameworks for reading-style speech use text-dependent information referred to as context. However, to achieve more human-like speech synthesis, we should take paralinguistic and nonlinguistic features into account. We focus on adding contextual features to the input features of DNN-based speech synthesis using spontaneous speech corpus with rich tags including paralinguistic and nonlinguistic features such as prosody, disfluency, and morphological features. Through experimental evaluations, we investigate the effectiveness of additional contextual factors and show which factors enhance the naturalness as spontaneous speech. This paper contributes as a guide to data collection for speech synthesis.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weiwei Lin|AUTHOR Weiwei Lin]], [[Man-Wai Mak|AUTHOR Man-Wai Mak]]
</p><p class="cpabstractcardaffiliationlist">PolyU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3211–3215&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker recognition has seen impressive advances with the advent of deep neural networks (DNNs). However, state-of-the-art speaker recognition systems still rely on human engineering features such as mel-frequency cepstrum coefficients (MFCC). We believe that the handcrafted features limit the potential of the powerful representation of DNNs. Besides, there are also additional steps such as voice activity detection (VAD) and cepstral mean and variance normalization (CMVN) after computing the MFCC. In this paper, we show that MFCC, VAD, and CMVN can be replaced by the tools available in the standard deep learning toolboxes, such as a stacked of stride convolutions, temporal gating, and instance normalization. With these tools, we show that directly learning speaker embeddings from waveforms outperforms an x-vector network that uses MFCC or filter-bank output as features. We achieve an EER of 1.95% on the VoxCeleb1 test set using an end-to-end training scheme, which, to our best knowledge, is the best performance reported using raw waveforms. What’s more, the proposed method is complementary with x-vector systems. The fusion of the proposed method with x-vectors trained on filter-bank features produce an EER of 1.55%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jaejin Cho|AUTHOR Jaejin Cho]], [[Piotr Żelasko|AUTHOR Piotr Żelasko]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Najim Dehak|AUTHOR Najim Dehak]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3256–3260&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Zero-shot multi-speaker Text-to-Speech (TTS) generates target speaker voices given an input text and the corresponding speaker embedding. In this work, we investigate the effectiveness of the TTS reconstruction objective to improve representation learning for speaker verification. We jointly trained end-to-end Tacotron 2 TTS and speaker embedding networks in a self-supervised fashion. We hypothesize that the embeddings will contain minimal phonetic information since the TTS decoder will obtain that information from the textual input. TTS reconstruction can also be combined with speaker classification to enhance these embeddings further. Once trained, the speaker encoder computes representations for the speaker verification task, while the rest of the TTS blocks are discarded. We investigated training TTS from either manual or ASR-generated transcripts. The latter allows us to train embeddings on datasets without manual transcripts. We compared ASR transcripts and Kaldi phone alignments as TTS inputs, showing that the latter performed better due to their finer resolution. Unsupervised TTS embeddings improved EER by 2.06% absolute with regard to i-vectors for the LibriTTS dataset. TTS with speaker classification loss improved EER by 0.28% and 2.88% absolutely from a model using only speaker classification loss in LibriTTS and Voxceleb1 respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Minh Pham|AUTHOR Minh Pham]], [[Zeqian Li|AUTHOR Zeqian Li]], [[Jacob Whitehill|AUTHOR Jacob Whitehill]]
</p><p class="cpabstractcardaffiliationlist">Worcester Polytechnic Institute, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3216–3220&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A common assumption when collecting speech datasets is that the accuracy of data labels strongly influences the accuracy of speaker embedding models and verification systems trained from these data. However, we show in experiments¹ on the large and diverse VoxCeleb2 dataset that this is not always the case: Under four different labeling models (Split, Merge, Permute, and Corrupt), we find that the impact on trained speaker embedding models, as measured by the Equal Error Rate (EER) of speaker verification, is mild (just a few percent absolute error increase), except with very large amounts of noise (i.e., every minibatch is almost completely corrupted). This suggests that efforts to collect speech datasets might benefit more from ensuring large size and diversity rather than meticulous labeling.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xuechen Liu|AUTHOR Xuechen Liu]]^^1^^, [[Md. Sahidullah|AUTHOR Md. Sahidullah]]^^2^^, [[Tomi Kinnunen|AUTHOR Tomi Kinnunen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Eastern Finland, Finland; ^^2^^Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3221–3225&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modern automatic speaker verification relies largely on deep neural networks (DNNs) trained on mel-frequency cepstral coefficient (MFCC) features. While there are alternative feature extraction methods based on phase, prosody and long-term temporal operations, they have not been extensively studied with DNN-based methods. We aim to fill this gap by providing extensive re-assessment of 14 feature extractors on VoxCeleb and SITW datasets. Our findings reveal that features equipped with techniques such as spectral centroids, group delay function, and integrated noise suppression provide promising alternatives to MFCCs for deep speaker embeddings extraction. Experimental results demonstrate up to 16.3% (VoxCeleb) and 25.1% (SITW) relative decrease in equal error rate (EER) to the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Xia|AUTHOR Wei Xia]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3226–3230&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we propose the global context guided channel and time-frequency transformations to model the long-range, non-local time-frequency dependencies and channel variances in speaker representations. We use the global context information to enhance important channels and recalibrate salient time-frequency locations by computing the similarity between the global context and local features. The proposed modules, together with a popular ResNet based model, are evaluated on the VoxCeleb1 dataset, which is a large scale speaker verification corpus collected in the wild. This lightweight block can be easily incorporated into a CNN model with little additional computational costs and effectively improves the speaker verification performance compared to the baseline ResNet-LDE model and the Squeeze&Excitation block by a large margin. Detailed ablation studies are also performed to analyze various factors that may impact the performance of the proposed modules. We find that by employing the proposed L2-tf-GTFC transformation block, the Equal Error Rate decreases from 4.56% to 3.07%, a relative 32.68% reduction, and a relative 27.28% improvement in terms of the DCF score. The results indicate that our proposed global context guided transformation modules can efficiently improve the learned speaker representations by achieving time-frequency and channel-wise feature recalibration.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yoohwan Kwon|AUTHOR Yoohwan Kwon]], [[Soo-Whan Chung|AUTHOR Soo-Whan Chung]], [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]
</p><p class="cpabstractcardaffiliationlist">Yonsei University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3231–3235&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose an effective training strategy to extract robust speaker representations from a speech signal. One of the key challenges in speaker recognition tasks is to learn latent representations or embeddings containing solely speaker characteristic information in order to be robust in terms of intra-speaker variations. By modifying the network architecture to generate both speaker-related and speaker-unrelated representations, we exploit a learning criterion which minimizes the mutual information between these disentangled embeddings. We also introduce an identity change loss criterion which utilizes a reconstruction error to different utterances spoken by the same speaker. Since the proposed criteria reduce the variation of speaker characteristics caused by changes in background environment or spoken content, the resulting embeddings of each speaker become more consistent. The effectiveness of the proposed method is demonstrated through two tasks; disentanglement performance, and improvement of speaker recognition accuracy compared to the baseline model on a benchmark dataset, VoxCeleb1. Ablation studies also show the impact of each criterion on overall performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Munir Georges|AUTHOR Munir Georges]]^^1^^, [[Jonathan Huang|AUTHOR Jonathan Huang]]^^2^^, [[Tobias Bocklet|AUTHOR Tobias Bocklet]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Intel, Germany; ^^2^^Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3236–3240&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural networks (DNN) have recently been widely used in speaker recognition systems, achieving state-of-the-art performance on various benchmarks. The x-vector architecture is especially popular in this research community, due to its excellent performance and manageable computational complexity. In this paper, we present the lrx-vector system, which is the low-rank factorized version of the x-vector embedding network. The primary objective of this topology is to further reduce the memory requirement of the speaker recognition system. We discuss the deployment of knowledge distillation for training the lrx-vector system and compare against low-rank factorization with SVD. On the VOiCES 2019 far-field corpus we were able to reduce the weights by 28% compared to the full-rank x-vector system while keeping the recognition rate constant (1.83% EER).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Florian L. Kreyssig|AUTHOR Florian L. Kreyssig]], [[Philip C. Woodland|AUTHOR Philip C. Woodland]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3241–3245&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a semi-supervised learning (SSL) technique for training deep neural networks (DNNs) to generate speaker-discriminative acoustic embeddings (speaker embeddings). Obtaining large amounts of speaker recognition training data can be difficult for desired target domains, especially under privacy constraints. The proposed technique reduces requirements for labelled data by leveraging unlabelled data. The technique is a variant of virtual adversarial training (VAT) [1] in the form of a loss that is defined as the robustness of the speaker embedding against input perturbations, as measured by the cosine-distance. Thus, we term the technique cosine-distance virtual adversarial training (CD-VAT). In comparison to many existing SSL techniques, the unlabelled data does not have to come from the same set of classes (here speakers) as the labelled data. The effectiveness of CD-VAT is shown on the 2750+ hour VoxCeleb data set, where on a speaker verification task it achieves a reduction in equal error rate (EER) of 11.1% relative to a purely supervised baseline. This is 32.5% of the improvement that would be achieved from supervised training if the speaker labels for the unlabelled data were available.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Junyi Peng|AUTHOR Junyi Peng]]^^1^^, [[Rongzhi Gu|AUTHOR Rongzhi Gu]]^^2^^, [[Yuexian Zou|AUTHOR Yuexian Zou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Peking University, China; ^^2^^Peking University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3246–3250&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, speaker verification systems using deep neural networks have shown their effectiveness on large scale datasets. The widely used pairwise loss functions only consider the discrimination within a mini-batch data (short-term), while either the speaker identity information or the whole training dataset is not fully exploited. Thus, these pairwise comparisons may suffer from the interferences and variances brought by speaker-unrelated factors. To tackle this problem, we introduce the speaker identity information to form long-term speaker embedding centroids, which are determined by all the speakers in the training set. During the training process, each centroid dynamically accumulates the statistics of all samples belonging to a specific speaker. Since the long-term speaker embedding centroids are associated with a wide range of training samples, these centroids have the potential to be more robust and discriminative. Finally, these centroids are employed to construct a loss function, named long short term speaker loss (LSTSL). The proposed LSTSL constrains that the distances between samples and centroid from the same speaker are compact while those from different speakers are dispersed. Experiments are conducted on VoxCeleb1 and VoxCeleb2. Results on the VoxCeleb1 dataset demonstrate the effectiveness of our proposed LSTSL.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lantian Li|AUTHOR Lantian Li]], [[Dong Wang|AUTHOR Dong Wang]], [[Thomas Fang Zheng|AUTHOR Thomas Fang Zheng]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3251–3255&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Probabilistic Linear Discriminant Analysis (PLDA) is a popular tool in open-set classification/verification tasks. However, the Gaussian assumption underlying PLDA prevents it from being applied to situations where the data is clearly non-Gaussian. In this paper, we present a novel nonlinear version of PLDA named as Neural Discriminant Analysis (NDA). This model employs an invertible deep neural network to transform a complex distribution to a simple Gaussian, so that the linear Gaussian model can be readily established in the transformed space. We tested this NDA model on a speaker recognition task where the deep speaker vectors (x-vectors) are presumably non-Gaussian. Experimental results on two datasets demonstrate that NDA consistently outperforms PLDA, by handling the non-Gaussian distributions of the x-vectors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yan Zhao|AUTHOR Yan Zhao]], [[DeLiang Wang|AUTHOR DeLiang Wang]]
</p><p class="cpabstractcardaffiliationlist">Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3261–3265&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Background noise and room reverberation are two major distortions to the speech signal in real-world environments. Each of them degrades speech intelligibility and quality, and their combined effects are especially detrimental. In this paper, we propose a DenseUNet based model for noisy-reverberant speech enhancement, where a novel time-frequency (T-F) attention mechanism is introduced to aggregate contextual information among different T-F units efficiently and a channelwise attention is developed to merge sources of information among different feature maps. In addition, we introduce a normalization-activation strategy to alleviate the performance drop for small batch training. Systematic evaluations demonstrate that the proposed algorithm substantially improves objective speech intelligibility and quality in various noisy-reverberant conditions, and outperforms other related methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhuohuang Zhang|AUTHOR Zhuohuang Zhang]]^^1^^, [[Chengyun Deng|AUTHOR Chengyun Deng]]^^2^^, [[Yi Shen|AUTHOR Yi Shen]]^^1^^, [[Donald S. Williamson|AUTHOR Donald S. Williamson]]^^1^^, [[Yongtao Sha|AUTHOR Yongtao Sha]]^^2^^, [[Yi Zhang|AUTHOR Yi Zhang]]^^2^^, [[Hui Song|AUTHOR Hui Song]]^^2^^, [[Xiangang Li|AUTHOR Xiangang Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indiana University, USA; ^^2^^DiDi Chuxing, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3266–3270&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent work has shown that it is feasible to use generative adversarial networks (GANs) for speech enhancement, however, these approaches have not been compared to state-of-the-art (SOTA) non GAN-based approaches. Additionally, many loss functions have been proposed for GAN-based approaches, but they have not been adequately compared. In this study, we propose novel convolutional recurrent GAN (CRGAN) architectures for speech enhancement. Multiple loss functions are adopted to enable direct comparisons to other GAN-based systems. The benefits of including recurrent layers are also explored. Our results show that the proposed CRGAN model outperforms the SOTA GAN-based models using the same loss functions and it outperforms other non-GAN based systems, indicating the benefits of using a GAN for speech enhancement. Overall, the CRGAN model that combines an objective metric loss function with the mean squared error (MSE) provides the best performance over comparison approaches across many evaluation metrics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhihao Du|AUTHOR Zhihao Du]]^^1^^, [[Ming Lei|AUTHOR Ming Lei]]^^2^^, [[Jiqing Han|AUTHOR Jiqing Han]]^^1^^, [[Shiliang Zhang|AUTHOR Shiliang Zhang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Harbin Institute of Technology, China; ^^2^^Alibaba Group, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3271–3275&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In our previous study, we introduce the neural vocoder into monaural speech enhancement, in which a flow-based generative vocoder is used to synthesize speech waveforms from the Mel power spectra enhanced by a denoising autoencoder. As a result, this vocoder-based enhancement method outperforms several state-of-the-art models on a speaker-dependent dataset. However, we find that there is a big gap between the enhancement performance on the trained and untrained noises. Therefore, in this paper, we propose the self-supervised adversarial multi-task learning (SAMLE) to improve the noise generalization ability. In addition, the speaker dependence is also evaluated for the vocoder-based methods, which is important for real-life applications. Experimental results show that the proposed SAMLE further improves the enhancement performance on both trained and untrained noises, resulting in a better noise generalization ability. Moreover, we find that vocoder-based enhancement methods can be speaker-independent through a large-scale training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mikolaj Kegler|AUTHOR Mikolaj Kegler]], [[Pierre Beckmann|AUTHOR Pierre Beckmann]], [[Milos Cernak|AUTHOR Milos Cernak]]
</p><p class="cpabstractcardaffiliationlist">Logitech, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3276–3280&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transient loud intrusions, often occurring in noisy environments, can completely overpower speech signal and lead to an inevitable loss of information. While existing algorithms for noise suppression can yield impressive results, their efficacy remains limited for very low signal-to-noise ratios or when parts of the signal are missing. To address these limitations, here we propose an end-to-end framework for speech inpainting, the context-based retrieval of missing or severely distorted parts of time-frequency representation of speech. The framework is based on a convolutional U-Net trained via deep feature losses, obtained using speechVGG, a deep speech feature extractor pre-trained on an auxiliary word classification task. Our evaluation results demonstrate that the proposed framework can recover large portions of missing or distorted time-frequency representation of speech, up to 400 ms and 3.2 kHz in bandwidth. In particular, our approach provided a substantial increase in STOI & PESQ objective metrics of the initially corrupted speech samples. Notably, using deep feature losses to train the framework led to the best results, as compared to conventional approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nikhil Shankar|AUTHOR Nikhil Shankar]], [[Gautam Shreedhar Bhat|AUTHOR Gautam Shreedhar Bhat]], [[Issa M.S. Panahi|AUTHOR Issa M.S. Panahi]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3281–3285&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a deep neural network architecture comprising of both convolutional neural network (CNN) and recurrent neural network (RNN) layers for real-time single-channel speech enhancement (SE). The proposed neural network model focuses on enhancing the noisy speech magnitude spectrum on a frame-by-frame process. The developed model is implemented on the smartphone (edge device), to demonstrate the real-time usability of the proposed method. Perceptual evaluation of speech quality (PESQ) and short-time objective intelligibility (STOI) test results are used to compare the proposed algorithm to previously published conventional and deep learning-based SE methods. Subjective ratings show the performance improvement of the proposed model over the other baseline SE methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ju Lin|AUTHOR Ju Lin]]^^1^^, [[Sufeng Niu|AUTHOR Sufeng Niu]]^^2^^, [[Adriaan J. van Wijngaarden|AUTHOR Adriaan J. van Wijngaarden]]^^3^^, [[Jerome L. McClendon|AUTHOR Jerome L. McClendon]]^^1^^, [[Melissa C. Smith|AUTHOR Melissa C. Smith]]^^1^^, [[Kuang-Ching Wang|AUTHOR Kuang-Ching Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Clemson University, USA; ^^2^^LinkedIn, USA; ^^3^^Nokia Bell Labs, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3286–3290&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement is an essential component in robust automatic speech recognition (ASR) systems. Most speech enhancement methods are nowadays based on neural networks that use feature-mapping or mask-learning. This paper proposes a novel speech enhancement method that integrates time-domain feature mapping and mask learning into a unified framework using a Generative Adversarial Network (GAN). The proposed framework processes the received waveform and decouples speech and noise signals, which are fed into two short-time Fourier transform (STFT) convolution 1-D layers that map the waveforms to spectrograms in the complex domain. These speech and noise spectrograms are then used to compute the speech mask loss. The proposed method is evaluated using the TIMIT data set for seen and unseen signal-to-noise ratio conditions. It is shown that the proposed method outperforms the speech enhancement methods that use Deep Neural Network (DNN) based speech enhancement or a Speech Enhancement Generative Adversarial Network (SEGAN).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexandre Défossez|AUTHOR Alexandre Défossez]]^^1^^, [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]]^^2^^, [[Yossi Adi|AUTHOR Yossi Adi]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Facebook, France; ^^2^^Facebook, USA; ^^3^^Facebook, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3291–3295&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a causal speech enhancement model working on the raw waveform that runs in real-time on a laptop CPU. The proposed model is based on an encoder-decoder architecture with skip-connections. It is optimized on both time and frequency domains, using multiple loss functions. Empirical evidence shows that it is capable of removing various kinds of background noise including stationary and non-stationary noises, as well as room reverb. Additionally, we suggest a set of data augmentation techniques applied directly on the raw waveform which further improve model performance and its generalization abilities. We perform evaluations on several standard benchmarks, both using objective metrics and human judgements. The proposed model matches state-of-the-art performance of both causal and non causal methods while working directly on the raw waveform.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michal Romaniuk|AUTHOR Michal Romaniuk]], [[Piotr Masztalski|AUTHOR Piotr Masztalski]], [[Karol Piaskowski|AUTHOR Karol Piaskowski]], [[Mateusz Matuszewski|AUTHOR Mateusz Matuszewski]]
</p><p class="cpabstractcardaffiliationlist">Samsung, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3296–3300&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose Mobile Audio Streaming Networks (MASnet) for efficient low-latency speech enhancement, which is particularly suitable for mobile devices and other applications where computational capacity is a limitation. MASnet processes linear-scale spectrograms, transforming successive noisy frames into complex-valued ratio masks which are then applied to the respective noisy frames. MASnet can operate in a low-latency incremental inference mode which matches the complexity of layer-by-layer batch mode. Compared to a similar fully-convolutional architecture, MASnet incorporates depthwise and pointwise convolutions for a large reduction in fused multiply-accumulate operations per second (FMA/s), at the cost of some reduction in SNR.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuya Chiba|AUTHOR Yuya Chiba]], [[Takashi Nose|AUTHOR Takashi Nose]], [[Akinori Ito|AUTHOR Akinori Ito]]
</p><p class="cpabstractcardaffiliationlist">Tohoku University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3301–3305&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a speech emotion recognition technique that considers the suprasegmental characteristics and temporal change of individual speech parameters. In recent years, speech emotion recognition using Bidirectional LSTM (BLSTM) has been studied actively because the model can focus on a particular temporal region that contains strong emotional characteristics. One of the model’s weaknesses is that it cannot consider the statistics of speech features, which are known to be effective for speech emotion recognition. Besides, this method cannot train individual attention parameters for different descriptors because it handles the input sequence by a single BLSTM. In this paper, we introduce feature segmentation and multi-stream processing into attention-based BLSTM to solve these problems. In addition, we employed data augmentation based on emotional speech synthesis in a training step. The classification experiments between four emotions (i.e., anger, joy, neutral, and sadness) using the Japanese Twitter-based Emotional Speech corpus (JTES) showed that the proposed method obtained a recognition accuracy of 73.4%, which is comparable to human evaluation (75.5%).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guanjun Li|AUTHOR Guanjun Li]]^^1^^, [[Shan Liang|AUTHOR Shan Liang]]^^1^^, [[Shuai Nie|AUTHOR Shuai Nie]]^^1^^, [[Wenju Liu|AUTHOR Wenju Liu]]^^1^^, [[Zhanlei Yang|AUTHOR Zhanlei Yang]]^^2^^, [[Longshuai Xiao|AUTHOR Longshuai Xiao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CAS, China; ^^2^^Huawei Technologies, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3306–3310&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The post-filter for microphone array speech enhancement can effectively suppress noise including point interferers. However, the suppression of point interferers relies on the accurate estimation of the number and directions of point interferers, which is a difficult task in practical situations. In this paper, we propose a post-filtering algorithm, which is independent of the number and directions of point interferers. Specifically, we assume that the point interferers are continuously distributed at each direction of the plane but the probability of the interferer occurring at each direction is different in order to calculate the spatial covariance matrix of the point interferers. Moreover, we assume that the noise is additive and uncorrelated with the target signal to obtain the power spectral densities (PSDs) of the target signal and noise. Finally, the proposed post-filter is calculated using the estimated PSDs. Experimental results prove that the proposed post-filtering algorithm is superior to the comparative algorithms in the scenarios where the number and directions of point interferers are not accurately estimated.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Atsuo Hiroe|AUTHOR Atsuo Hiroe]]
</p><p class="cpabstractcardaffiliationlist">Sony, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3311–3315&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study presents a novel method for source extraction, referred to as the similarity-and-independence-aware beamformer (SIBF). The SIBF extracts the target signal using a rough magnitude spectrogram as the reference signal. The advantage of the SIBF is that it can obtain an accurate target signal, compared to the spectrogram generated by target-enhancing methods such as the speech enhancement based on deep neural networks (DNNs). For the extraction, we extend the framework of the deflationary independent component analysis, by considering the similarity between the reference and extracted target, as well as the mutual independence of all potential sources. To solve the extraction problem by maximum-likelihood estimation, we introduce two source model types that can reflect the similarity. The experimental results from the CHiME3 dataset show that the target signal extracted by the SIBF is more accurate than the reference signal generated by the DNN.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Oleg Golokolenko|AUTHOR Oleg Golokolenko]], [[Gerald Schuller|AUTHOR Gerald Schuller]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität Ilmenau, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3316–3320&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a novel fast time domain audio source separation technique based on fractional delay filters with low computational complexity and small algorithmic delay is presented and evaluated in experiments. Our goal is a Blind Source Separation (BSS) technique, which can be applicable for the low cost and low power devices where processing is done in real-time, e.g. hearing aids or teleconferencing setups. The proposed approach optimizes fractional delays implemented as IIR filters and attenuation factors between microphone signals to minimize crosstalk, the principle of a fractional delay and sum beamformer. The experiments have been carried out for offline separation with stationary sound sources and for real-time with randomly moving sound sources. Experimental results show that separation performance of the proposed time domain BSS technique is competitive with State-of-the-Art (SoA) approaches but has lower computational complexity and no system delay like in frequency domain BSS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cunhang Fan|AUTHOR Cunhang Fan]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3321–3325&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multi-channel deep clustering (MDC) has acquired a good performance for speech separation. However, MDC only applies the spatial features as the additional information, which does not fuse them with the spectral features very well. So it is difficult to learn mutual relationship between spatial and spectral features. Besides, the training objective of MDC is defined at embedding vectors, rather than real separated sources, which may damage the separation performance. In this work, we deal with spatial and spectral features as two different modalities. We propose the gated recurrent fusion (GRF) method to adaptively select and fuse the relevant information from spectral and spatial features by making use of the gate and memory modules. In addition, to solve the training objective problem of MDC, the real separated sources are used as the training objectives. Specifically, we apply the deep clustering network to extract deep embedding features. Instead of using the unsupervised K-means clustering to estimate binary masks, another supervised network is utilized to learn soft masks from these deep embedding features. Our experiments are conducted on a spatialized reverberant version of WSJ0-2mix dataset. Experimental results show that the proposed method outperforms MDC baseline and even better than the oracle ideal binary mask (IBM).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Robin Scheibler|AUTHOR Robin Scheibler]]
</p><p class="cpabstractcardaffiliationlist">LINE, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3326–3330&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We revisit the source image estimation problem from blind source separation (BSS). We generalize the traditional minimum distortion principle to maximum likelihood estimation with a model for the residual spectrograms. Because residual spectrograms typically contain other sources, we propose to use a mixed-norm model that lets us finely tune sparsity in time and frequency. We propose to carry out the minimization of the mixed-norm via majorization-minimization optimization, leading to an iteratively reweighted least-squares algorithm. The algorithm balances well efficiency and ease of implementation. We assess the performance of the proposed method as applied to two well-known determined BSS and one joint BSS-dereverberation algorithms. We find out that it is possible to tune the parameters to improve separation by up to 2 dB, with no increase in distortion, and at little computational cost. The method thus provides a cheap and easy way to boost the performance of blind source separation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ying Zhong|AUTHOR Ying Zhong]], [[Ying Hu|AUTHOR Ying Hu]], [[Hao Huang|AUTHOR Hao Huang]], [[Wushour Silamu|AUTHOR Wushour Silamu]]
</p><p class="cpabstractcardaffiliationlist">Xinjiang University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3331–3335&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>One of the major challenges in Speech Emotion Recognition (SER) is to build a lightweight model with limited training data. In this paper, we propose a lightweight architecture with only fewer parameters which is based on separable convolution and inverted residuals. Speech samples are often annotated by multiple raters. While some sentences with clear emotional content are consistently annotated (easy samples), sentences with ambiguous emotional content present important disagreement between individual evaluations (hard samples). We assumed that samples hard for humans are also hard for computers. We address the problem by using focal loss, which focus on learning hard samples and down-weight easy samples. By combining attention mechanism, our proposed network can enhance the importing of emotion-salient information. Our proposed model achieves 71.72% and 90.1% of unweighted accuracy (UA) on the well-known corpora IEMOCAP and Emo-DB respectively. Comparing with the current model having fewest parameters as we know, its model size is almost 5 times of our proposed model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruichu Cai|AUTHOR Ruichu Cai]]^^1^^, [[Kaibin Guo|AUTHOR Kaibin Guo]]^^1^^, [[Boyan Xu|AUTHOR Boyan Xu]]^^1^^, [[Xiaoyan Yang|AUTHOR Xiaoyan Yang]]^^2^^, [[Zhenjie Zhang|AUTHOR Zhenjie Zhang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^GDUT, China; ^^2^^YITU Technology, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3336–3340&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most existing Speech Emotion Recognition (SER) approaches ignore the relationship between the categorical emotional labels and the dimensional labels in valence, activation or dominance space. Although multi-task learning has recently been introduced to explore such auxiliary tasks of SER, existing approaches only share the feature extractor under the traditional multi-task learning framework and can not efficiently transfer the knowledge from the auxiliary tasks to the target task. In order to address these issues, we propose a Meta Multi-task Learning method for SER by combining the multi-task learning with meta learning. Our contributions include: 1) to model the relationship among auxiliary tasks, we extend the task generation of meta learning to the form of multiple tasks, and 2) to transfer the knowledge from the auxiliary tasks to the target task, we propose a tuning-based transfer training mechanism in the meta learning framework. The experiments on IEMOCAP show that our approach outperforms the state-of-the-art solution (UA: 70.32%, WA: 76.64%).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[François Grondin|AUTHOR François Grondin]], [[Jean-Samuel Lauzon|AUTHOR Jean-Samuel Lauzon]], [[Jonathan Vincent|AUTHOR Jonathan Vincent]], [[François Michaud|AUTHOR François Michaud]]
</p><p class="cpabstractcardaffiliationlist">Université de Sherbrooke, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3341–3345&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Distant speech processing is a challenging task, especially when dealing with the cocktail party effect. Sound source separation is thus often required as a preprocessing step prior to speech recognition to improve the signal to distortion ratio (SDR). Recently, a combination of beamforming and speech separation networks have been proposed to improve the target source quality in the direction of arrival of interest. However, with this type of approach, the neural network needs to be trained in advance for a specific microphone array geometry, which limits versatility when adding/removing microphones, or changing the shape of the array. The solution presented in this paper is to train a neural network on pairs of microphones with different spacing and acoustic environmental conditions, and then use this network to estimate a time-frequency mask from all the pairs of microphones forming the array with an arbitrary shape. Using this mask, the target and noise covariance matrices can be estimated, and then used to perform generalized eigenvalue (GEV) beamforming. Results show that the proposed approach improves the SDR from 4.78 dB to 7.69 dB on average, for various microphone array geometries that correspond to commercially available hardware.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Christin Jose|AUTHOR Christin Jose]], [[Yuriy Mishchenko|AUTHOR Yuriy Mishchenko]], [[Thibaud Sénéchal|AUTHOR Thibaud Sénéchal]], [[Anish Shah|AUTHOR Anish Shah]], [[Alex Escott|AUTHOR Alex Escott]], [[Shiv Naga Prasad Vitaladevuni|AUTHOR Shiv Naga Prasad Vitaladevuni]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3346–3350&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Small footprint embedded devices require keyword spotters (KWS) with small model size and detection latency for enabling voice assistants. Such a keyword is often referred to as //wake word// as it is used to wake up voice assistant enabled devices. Together with wake word detection, accurate estimation of wake word endpoints (start and end) is an important task of KWS. In this paper, we propose two new methods for detecting the endpoints of wake words in neural KWS that use single-stage word-level neural networks. Our results show that the new techniques give superior accuracy for detecting wake words’ endpoints of up to 50 msec standard error versus human annotations, on par with the conventional Acoustic Model plus HMM forced alignment. To our knowledge, this is the first study of wake word endpoints detection methods for single-stage neural KWS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Saurabh Adya|AUTHOR Saurabh Adya]]^^1^^, [[Vineet Garg|AUTHOR Vineet Garg]]^^1^^, [[Siddharth Sigtia|AUTHOR Siddharth Sigtia]]^^2^^, [[Pramod Simha|AUTHOR Pramod Simha]]^^1^^, [[Chandra Dhir|AUTHOR Chandra Dhir]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Apple, USA; ^^2^^Apple, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3351–3355&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We consider the design of two-pass voice trigger detection systems. We focus on the networks in the second pass that are used to re-score candidate segments obtained from the first-pass. Our baseline is an acoustic model(AM), with BiLSTM layers, trained by minimizing the CTC loss. We replace the BiLSTM layers with self-attention layers. Results on internal evaluation sets show that self-attention networks yield better accuracy while requiring fewer parameters. We add an auto-regressive decoder network on top of the self-attention layers and jointly minimize the CTC loss on the encoder and the cross-entropy loss on the decoder. This design yields further improvements over the baseline. We retrain all the models above in a multi-task learning(MTL) setting, where one branch of a shared network is trained as an AM, while the second branch classifies the whole sequence to be true-trigger or not. Results demonstrate that networks with self-attention layers yield ~60% relative reduction in false reject rates for a given false-alarm rate, while requiring 10% fewer parameters. When trained in the MTL setup, self-attention networks yield further accuracy improvements. On-device measurements show that we observe 70% relative reduction in inference time. Additionally, the proposed network architectures are ~5× faster to train.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Somshubra Majumdar|AUTHOR Somshubra Majumdar]], [[Boris Ginsburg|AUTHOR Boris Ginsburg]]
</p><p class="cpabstractcardaffiliationlist">NVIDIA, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3356–3360&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present //MatchboxNet// — an end-to-end neural network for speech command recognition. MatchboxNet is a deep residual network composed from blocks of 1D time-channel separable convolution, batch-normalization, ReLU and dropout layers. MatchboxNet reaches state-of-the art accuracy on the Google Speech Commands dataset while having significantly fewer parameters than similar models. The small footprint of MatchboxNet makes it an attractive candidate for devices with limited computational resources. The model is highly scalable, so model accuracy can be improved with modest additional memory and compute. Finally, we show how intensive data augmentation using an auxiliary noise dataset improves robustness in the presence of background noise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abhinav Mehrotra|AUTHOR Abhinav Mehrotra]]^^1^^, [[Łukasz Dudziak|AUTHOR Łukasz Dudziak]]^^1^^, [[Jinsu Yeo|AUTHOR Jinsu Yeo]]^^2^^, [[Young-yoon Lee|AUTHOR Young-yoon Lee]]^^2^^, [[Ravichander Vipperla|AUTHOR Ravichander Vipperla]]^^1^^, [[Mohamed S. Abdelfattah|AUTHOR Mohamed S. Abdelfattah]]^^1^^, [[Sourav Bhattacharya|AUTHOR Sourav Bhattacharya]]^^1^^, [[Samin Ishtiaq|AUTHOR Samin Ishtiaq]]^^1^^, [[Alberto Gil C.P. Ramos|AUTHOR Alberto Gil C.P. Ramos]]^^1^^, [[SangJeong Lee|AUTHOR SangJeong Lee]]^^2^^, [[Daehyun Kim|AUTHOR Daehyun Kim]]^^2^^, [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Samsung, UK; ^^2^^Samsung, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3361–3365&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Increasing demand for on-device Automatic Speech Recognition (ASR) systems has resulted in renewed interests in developing automatic model compression techniques. Past research have shown that AutoML-based Low Rank Factorization (LRF) technique, when applied to an end-to-end Encoder-Attention-Decoder style ASR model, can achieve a speedup of up to 3.7×, outperforming laborious manual rank-selection approaches. However, we show that current AutoML-based search techniques only work up to a certain compression level, beyond which they fail to produce compressed models with acceptable word error rates (WER). In this work, we propose an iterative AutoML-based LRF approach that achieves over 5× compression without degrading the WER, thereby advancing the state-of-the-art in ASR compression.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hieu Duy Nguyen|AUTHOR Hieu Duy Nguyen]], [[Anastasios Alexandridis|AUTHOR Anastasios Alexandridis]], [[Athanasios Mouchtaris|AUTHOR Athanasios Mouchtaris]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3366–3370&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Compression and quantization is important to neural networks in general and Automatic Speech Recognition (ASR) systems in particular, especially when they operate in real-time on resource-constrained devices. By using fewer number of bits for the model weights, the model size becomes much smaller while inference time is reduced significantly, with the cost of degraded performance. Such degradation can be potentially addressed by the so-called quantization-aware training (QAT). Existing QATs mostly take into account the quantization in forward propagation, while ignoring the quantization loss in gradient calculation during back-propagation. In this work, we introduce a novel QAT scheme based on absolute-cosine regularization (ACosR), which enforces a prior, quantization-friendly distribution to the model weights. We apply this novel approach into ASR task assuming a recurrent neural network transducer (RNN-T) architecture. The results show that there is zero to little degradation between floating-point, 8-bit, and 6-bit ACosR models. Weight distributions further confirm that in-training weights are very close to quantization levels when ACosR is applied.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abhinav Garg|AUTHOR Abhinav Garg]]^^1^^, [[Gowtham P. Vadisetti|AUTHOR Gowtham P. Vadisetti]]^^2^^, [[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]]^^1^^, [[Sichen Jin|AUTHOR Sichen Jin]]^^1^^, [[Aditya Jayasimha|AUTHOR Aditya Jayasimha]]^^1^^, [[Youngho Han|AUTHOR Youngho Han]]^^1^^, [[Jiyeon Kim|AUTHOR Jiyeon Kim]]^^1^^, [[Junmo Park|AUTHOR Junmo Park]]^^1^^, [[Kwangyoun Kim|AUTHOR Kwangyoun Kim]]^^1^^, [[Sooyeon Kim|AUTHOR Sooyeon Kim]]^^1^^, [[Young-yoon Lee|AUTHOR Young-yoon Lee]]^^1^^, [[Kyungbo Min|AUTHOR Kyungbo Min]]^^1^^, [[Chanwoo Kim|AUTHOR Chanwoo Kim]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Samsung, Korea; ^^2^^Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3371–3375&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present our streaming on-device end-to-end speech recognition solution for a privacy sensitive voice-typing application which primarily involves typing user private details and passwords. We highlight challenges specific to voice-typing scenario in the Korean language and propose solutions to these problems within the framework of a streaming attention-based speech recognition system. Some important challenges in voice-typing are the choice of output units, coupling of multiple characters into longer byte-pair encoded units, lack of sufficient training data. Apart from customizing a high accuracy open domain streaming speech recognition model for voice-typing applications, we retain the performance of the model for open domain tasks without significant degradation. We also explore domain biasing using a shallow fusion with a weighted finite state transducer (WFST). We obtain approximately 13% relative word error rate (WER) improvement on our internal Korean voice-typing dataset without a WFST and about 30% additional WER improvement with a WFST fusion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vineel Pratap|AUTHOR Vineel Pratap]], [[Qiantong Xu|AUTHOR Qiantong Xu]], [[Jacob Kahn|AUTHOR Jacob Kahn]], [[Gilad Avidov|AUTHOR Gilad Avidov]], [[Tatiana Likhomanenko|AUTHOR Tatiana Likhomanenko]], [[Awni Hannun|AUTHOR Awni Hannun]], [[Vitaliy Liptchinsky|AUTHOR Vitaliy Liptchinsky]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3376–3380&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We design an online end-to-end speech recognition system based on Time-Depth Separable (TDS) convolutions and Connectionist Temporal Classification (CTC). We improve the core TDS architecture in order to limit the future context and hence reduce latency while maintaining accuracy. The system has almost three times the throughput of a well tuned hybrid ASR baseline while also having lower latency and a better word error rate. Also important to the efficiency of the recognizer is our highly optimized beam search decoder. To show the impact of our design choices, we analyze throughput, latency, accuracy, and discuss how these metrics can be tuned based on the user requirements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ye Bai|AUTHOR Ye Bai]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Shuai Zhang|AUTHOR Shuai Zhang]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3381–3385&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Although attention based end-to-end models have achieved promising performance in speech recognition, the multi-pass forward computation in beam-search increases inference time cost, which limits their practical applications. To address this issue, we propose a non-autoregressive end-to-end speech recognition system called LASO (listen attentively, and spell once). Because of the non-autoregressive property, LASO predicts a textual token in the sequence without the dependence on other tokens. Without beam-search, the one-pass propagation much reduces inference time cost of LASO. And because the model is based on the attention based feedforward structure, the computation can be implemented in parallel efficiently. We conduct experiments on publicly available Chinese dataset AISHELL-1. LASO achieves a character error rate of 6.4%, which outperforms the state-of-the-art autoregressive transformer model (6.7%). The average inference latency is 21 ms, which is 1/50 of the autoregressive transformer model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Grant P. Strimel|AUTHOR Grant P. Strimel]], [[Ariya Rastrow|AUTHOR Ariya Rastrow]], [[Gautam Tiwari|AUTHOR Gautam Tiwari]], [[Adrien Piérard|AUTHOR Adrien Piérard]], [[Jon Webb|AUTHOR Jon Webb]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3386–3390&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce DashHashLM, an efficient data structure that stores an n-gram language model compactly while making minimal trade-offs on runtime lookup latency. The data structure implements a finite state transducer with a lossless structural compression and outperforms comparable implementations when considering lookup speed in the small-footprint setting. DashHashLM introduces several optimizations to language model compression which are designed to minimize expected memory accesses. We also present variations of DashHashLM appropriate for scenarios with different memory and latency constraints. We detail the algorithm and justify our design choices with comparative experiments on a speech recognition task. Specifically, we show that with roughly a 10% increase in memory size, compared to a highly optimized, compressed baseline n-gram representation, our proposed data structure can achieve up to a 6× query speedup.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaofei Li|AUTHOR Xiaofei Li]]^^1^^, [[Radu Horaud|AUTHOR Radu Horaud]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Westlake University, China; ^^2^^Inria, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2462–2466&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a delayed subband LSTM network for online monaural (single-channel) speech enhancement. The proposed method is developed in the short time Fourier transform (STFT) domain. Online processing requires frame-by-frame signal reception and processing. A paramount feature of the proposed method is that the same LSTM is used across frequencies, which drastically reduces the number of network parameters, the amount of training data and the computational burden. Training is performed in a subband manner: the input consists of a frequency together with a few context frequencies. The network learns a speech-to-noise discriminative function relying on the signal stationarity and on the local spectral pattern, based on which it predicts a clean-speech mask at each frequency. To exploit future information, i.e. a look-ahead strategy, we propose an output-delayed subband LSTM network, which allows the unidirectional forward network to use a few future frames to process the current frame. We leverage the proposed method to participate to the DNS real-time speech enhancement challenge. Experiments with the DNS dataset show that the proposed method achieves better performance-measuring scores than the DNS baseline method, which learns the full-band spectra using a gated recurrent unit network.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maximilian Strake|AUTHOR Maximilian Strake]]^^1^^, [[Bruno Defraene|AUTHOR Bruno Defraene]]^^2^^, [[Kristoff Fluyt|AUTHOR Kristoff Fluyt]]^^2^^, [[Wouter Tirry|AUTHOR Wouter Tirry]]^^2^^, [[Tim Fingscheidt|AUTHOR Tim Fingscheidt]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Braunschweig, Germany; ^^2^^Goodix Technology, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2467–2471&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Interspeech 2020 Deep Noise Suppression (DNS) Challenge focuses on evaluating low-latency single-channel speech enhancement algorithms under realistic test conditions. Our contribution to the challenge is a method for joint dereverberation and denoising based on complex spectral mask estimation using a fully convolutional recurrent network (FCRN) which relies on a convolutional LSTM layer for temporal modeling. Since the effects of reverberation and noise on perceived speech quality can differ notably, a multi-target loss for controlling the weight on desired dereverberation and denoising is proposed. In the crowdsourced subjective P.808 listening test conducted by the DNS Challenge organizers, the proposed method shows a significant overall improvement of 0.43 MOS points over the DNS Challenge baseline and ranks amongst the top-3 submissions for both realtime and non-realtime tracks of the challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanxin Hu|AUTHOR Yanxin Hu]]^^1^^, [[Yun Liu|AUTHOR Yun Liu]]^^2^^, [[Shubo Lv|AUTHOR Shubo Lv]]^^1^^, [[Mengtao Xing|AUTHOR Mengtao Xing]]^^1^^, [[Shimin Zhang|AUTHOR Shimin Zhang]]^^1^^, [[Yihui Fu|AUTHOR Yihui Fu]]^^1^^, [[Jian Wu|AUTHOR Jian Wu]]^^1^^, [[Bihong Zhang|AUTHOR Bihong Zhang]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^Sogou, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2472–2476&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement has benefited from the success of deep learning in terms of intelligibility and perceptual quality. Conventional time-frequency (TF) domain methods focus on predicting TF-masks or speech spectrum, via a naive convolution neural network (CNN) or recurrent neural network (RNN). Some recent studies use complex-valued spectrogram as a training target but train in a real-valued network, predicting the magnitude and phase component or real and imaginary part, respectively. Particularly, convolution recurrent network (CRN) integrates a convolutional encoder-decoder (CED) structure and long short-term memory (LSTM), which has been proven to be helpful for complex targets. In order to train the complex target more effectively, in this paper, we design a new network structure simulating the complex-valued operation, called Deep Complex Convolution Recurrent Network (DCCRN), where both CNN and RNN structures can handle complex-valued operation. The proposed DCCRN models are very competitive over other previous networks, either on objective or subjective metric. With only 3.7M parameters, our DCCRN models submitted to the Interspeech 2020 Deep Noise Suppression (DNS) challenge ranked first for the real-time-track and second for the non-real-time track in terms of Mean Opinion Score (MOS).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nils L. Westhausen|AUTHOR Nils L. Westhausen]], [[Bernd T. Meyer|AUTHOR Bernd T. Meyer]]
</p><p class="cpabstractcardaffiliationlist">Carl von Ossietzky Universität Oldenburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2477–2481&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces a dual-signal transformation LSTM network (DTLN) for real-time speech enhancement as part of the Deep Noise Suppression Challenge (DNS-Challenge). This approach combines a short-time Fourier transform (STFT) and a learned analysis and synthesis basis in a stacked-network approach with less than one million parameters. The model was trained on 500 h of noisy speech provided by the challenge organizers. The network is capable of real-time processing (one frame in, one frame out) and reaches competitive results. Combining these two types of signal transformations enables the DTLN to robustly extract information from magnitude spectra and incorporate phase information from the learned feature basis. The method shows state-of-the-art performance and outperforms the DNS-Challenge baseline by 0.24 points absolute in terms of the mean opinion score (MOS).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jean-Marc Valin|AUTHOR Jean-Marc Valin]]^^1^^, [[Umut Isik|AUTHOR Umut Isik]]^^2^^, [[Neerad Phansalkar|AUTHOR Neerad Phansalkar]]^^2^^, [[Ritwik Giri|AUTHOR Ritwik Giri]]^^2^^, [[Karim Helwani|AUTHOR Karim Helwani]]^^2^^, [[Arvindh Krishnaswamy|AUTHOR Arvindh Krishnaswamy]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, Canada; ^^2^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2482–2486&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Over the past few years, speech enhancement methods based on deep learning have greatly surpassed traditional methods based on spectral subtraction and spectral estimation. Many of these new techniques operate directly in the the short-time Fourier transform (STFT) domain, resulting in a high computational complexity. In this work, we propose PercepNet, an efficient approach that relies on human perception of speech by focusing on the spectral envelope and on the periodicity of the speech. We demonstrate high-quality, real-time enhancement of fullband (48 kHz) speech with less than 5% of a CPU core.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Umut Isik|AUTHOR Umut Isik]]^^1^^, [[Ritwik Giri|AUTHOR Ritwik Giri]]^^1^^, [[Neerad Phansalkar|AUTHOR Neerad Phansalkar]]^^1^^, [[Jean-Marc Valin|AUTHOR Jean-Marc Valin]]^^2^^, [[Karim Helwani|AUTHOR Karim Helwani]]^^1^^, [[Arvindh Krishnaswamy|AUTHOR Arvindh Krishnaswamy]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^Amazon, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2487–2491&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural network applications generally benefit from larger-sized models, but for current speech enhancement models, larger scale networks often suffer from decreased robustness to the variety of real-world use cases beyond what is encountered in training data. We introduce several innovations that lead to better large neural networks for speech enhancement. The novel PoCoNet architecture is a convolutional neural network that, with the use of frequency-positional embeddings, is able to more efficiently build frequency-dependent features in the early layers. A semi-supervised method helps increase the amount of conversational training data by pre-enhancing noisy datasets, improving performance on real recordings. A new loss function biased towards preserving speech quality helps the optimization better match human perceptual opinions on speech quality. Ablation experiments and objective and human opinion metrics show the benefits of the proposed improvements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chandan K.A. Reddy|AUTHOR Chandan K.A. Reddy]], [[Vishak Gopal|AUTHOR Vishak Gopal]], [[Ross Cutler|AUTHOR Ross Cutler]], [[Ebrahim Beyrami|AUTHOR Ebrahim Beyrami]], [[Roger Cheng|AUTHOR Roger Cheng]], [[Harishchandra Dubey|AUTHOR Harishchandra Dubey]], [[Sergiy Matusevych|AUTHOR Sergiy Matusevych]], [[Robert Aichner|AUTHOR Robert Aichner]], [[Ashkan Aazami|AUTHOR Ashkan Aazami]], [[Sebastian Braun|AUTHOR Sebastian Braun]], [[Puneet Rana|AUTHOR Puneet Rana]], [[Sriram Srinivasan|AUTHOR Sriram Srinivasan]], [[Johannes Gehrke|AUTHOR Johannes Gehrke]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2492–2496&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The INTERSPEECH 2020 Deep Noise Suppression (DNS) Challenge is intended to promote collaborative research in real-time single-channel Speech Enhancement aimed to maximize the subjective (perceptual) quality of the enhanced speech. A typical approach to evaluate the noise suppression methods is to use objective metrics on the test set obtained by splitting the original dataset. While the performance is good on the synthetic test set, often the model performance degrades significantly on real recordings. Also, most of the conventional objective metrics do not correlate well with subjective tests and lab subjective tests are not scalable for a large test set. In this challenge, we open-sourced a large clean speech and noise corpus for training the noise suppression models and a representative test set to real-world scenarios consisting of both synthetic and real recordings. We also open-sourced an online subjective test framework based on ITU-T P.808 for researchers to reliably test their developments. We evaluated the results using P.808 on a blind test set. The results and the key learnings from the challenge are discussed.

The datasets and scripts can be found here for quick access https://github.com/microsoft/DNS-Challenge</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^, [[Anton Batliner|AUTHOR Anton Batliner]]^^2^^, [[Christian Bergler|AUTHOR Christian Bergler]]^^3^^, [[Eva-Maria Messner|AUTHOR Eva-Maria Messner]]^^4^^, [[Antonia Hamilton|AUTHOR Antonia Hamilton]]^^5^^, [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]]^^2^^, [[Alice Baird|AUTHOR Alice Baird]]^^2^^, [[Georgios Rizos|AUTHOR Georgios Rizos]]^^1^^, [[Maximilian Schmitt|AUTHOR Maximilian Schmitt]]^^2^^, [[Lukas Stappen|AUTHOR Lukas Stappen]]^^2^^, [[Harald Baumeister|AUTHOR Harald Baumeister]]^^4^^, [[Alexis Deighton MacIntyre|AUTHOR Alexis Deighton MacIntyre]]^^5^^, [[Simone Hantke|AUTHOR Simone Hantke]]^^6^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Imperial College London, UK; ^^2^^Universität Augsburg, Germany; ^^3^^FAU Erlangen-Nürnberg, Germany; ^^4^^Universität Ulm, Germany; ^^5^^University College London, UK; ^^6^^audEERING, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2042–2046&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The INTERSPEECH 2020 Computational Paralinguistics Challenge addresses three different problems for the first time in a research competition under well-defined conditions: In the //Elderly Emotion// Sub-Challenge, arousal and valence in the speech of elderly individuals have to be modelled as a 3-class problem; in the //Breathing// Sub-Challenge, breathing has to be assessed as a regression problem; and in the //Mask// Sub-Challenge, speech without and with a surgical mask has to be told apart. We describe the Sub-Challenges, baseline feature extraction, and classifiers based on the ‘usual’ COMPARE and BoAW features as well as deep unsupervised representation learning using the AUDEEP toolkit, and deep feature extraction from pre-trained CNNs using the DEEP SPECTRUM toolkit; in addition, we partially add deep end-to-end sequential modelling, and, for the first time in the challenge, linguistic analysis.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jeno Szep|AUTHOR Jeno Szep]], [[Salim Hariri|AUTHOR Salim Hariri]]
</p><p class="cpabstractcardaffiliationlist">University of Arizona, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2087–2091&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we address the ComParE 2020 Paralinguistics Mask sub-challenge, where the task is the detection of wearing surgical masks from short speech segments. In our approach, we propose a computer-vision-based pipeline to utilize the capabilities of deep convolutional neural network-based image classifiers developed in recent years and apply this technology to a specific class of spectrograms. Several linear and logarithmic scale spectrograms were tested, and the best performance is achieved on linear-scale, 3-Channel Spectrograms created from the audio segments. A single model image classifier provided a 6.1% better result than the best single-dataset baseline model. The ensemble of our models further improves accuracy and achieves 73.0% UAR by training just on the ‘train’ dataset and reaches 80.1% UAR on the test set when training includes the ‘devel’ dataset, which result is 8.3% higher than the baseline. We also provide an activation-mapping analysis to identify frequency ranges that are critical in the ‘mask’ versus ‘clear’ classification.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ziqing Yang|AUTHOR Ziqing Yang]], [[Zifan An|AUTHOR Zifan An]], [[Zehao Fan|AUTHOR Zehao Fan]], [[Chengye Jing|AUTHOR Chengye Jing]], [[Houwei Cao|AUTHOR Houwei Cao]]
</p><p class="cpabstractcardaffiliationlist">New York Tech, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2092–2096&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we investigate various acoustic features and lexical features for the INTERSPEECH 2020 Computational Paralinguistic Challenge. For the acoustic analysis, we show that the proposed FV-MFCC feature is very promising, which has very strong prediction power on its own, and can also provide complementary information when fused with other acoustic features. For the lexical representation, we find that the corpus-dependent TF.IDF feature is by far the best representation. We also explore several model fusion techniques to combine different modalities together, and propose novel SVM models to aggregate the chunk-level predictions to the narrative-level predictions based on the chunk-level decision functionals. Finally we discuss the potential for improving prediction by combining the lexical and acoustic modalities together, and we find that fusion of lexical and acoustic modalities do not lead to consistent improvements over elderly Arousal, but substantially improve over the Valence. Our methods significantly outperform the official baselines on the test set in the participated Mask and Elderly Sub-challenges. We obtain an UAR of 75.1%, 54.3%, and 59.0% on the Mask, Elderly Arousal and Valence prediction tasks respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gizem Soğancıoğlu|AUTHOR Gizem Soğancıoğlu]]^^1^^, [[Oxana Verkholyak|AUTHOR Oxana Verkholyak]]^^2^^, [[Heysem Kaya|AUTHOR Heysem Kaya]]^^1^^, [[Dmitrii Fedotov|AUTHOR Dmitrii Fedotov]]^^3^^, [[Tobias Cadée|AUTHOR Tobias Cadée]]^^1^^, [[Albert Ali Salah|AUTHOR Albert Ali Salah]]^^1^^, [[Alexey Karpov|AUTHOR Alexey Karpov]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universiteit Utrecht, The Netherlands; ^^2^^RAS, Russia; ^^3^^Universität Ulm, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2097–2101&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic and linguistic analysis for elderly emotion recognition is an under-studied and challenging research direction, but essential for the creation of digital assistants for the elderly, as well as unobtrusive telemonitoring of elderly in their residences for mental healthcare purposes. This paper presents our contribution to the INTERSPEECH 2020 Computational Paralinguistics Challenge (ComParE) - Elderly Emotion Sub-Challenge, which is comprised of two ternary classification tasks for arousal and valence recognition. We propose a bi-modal framework, where these tasks are modeled using state-of-the-art acoustic and linguistic features, respectively. In this study, we demonstrate that exploiting task-specific dictionaries and resources can boost the performance of linguistic models, when the amount of labeled data is small. Observing a high mismatch between development and test set performances of various models, we also propose alternative training and decision fusion strategies to better estimate and improve the generalization performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nicolae-Cătălin Ristea|AUTHOR Nicolae-Cătălin Ristea]]^^1^^, [[Radu Tudor Ionescu|AUTHOR Radu Tudor Ionescu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UPB, Romania; ^^2^^University of Bucharest, Romania</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2102–2106&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The task of detecting whether a person wears a face mask from speech is useful in modelling speech in forensic investigations, communication between surgeons or people protecting themselves against infectious diseases such as COVID-19. In this paper, we propose a novel data augmentation approach for mask detection from speech. Our approach is based on (i) training Generative Adversarial Networks (GANs) with cycle-consistency loss to translate unpaired utterances between two classes (with mask and without mask), and on (ii) generating new training utterances using the cycle-consistent GANs, assigning opposite labels to each translated utterance. Original and translated utterances are converted into spectrograms which are provided as input to a set of ResNet neural networks with various depths. The networks are combined into an ensemble through a Support Vector Machines (SVM) classifier. With this system, we participated in the Mask Sub-Challenge (MSC) of the INTERSPEECH 2020 Computational Paralinguistics Challenge, surpassing the baseline proposed by the organizers by 2.8%. Our data augmentation technique provided a performance boost of 0.9% on the private test set. Furthermore, we show that our data augmentation approach yields better results than other baseline and state-of-the-art augmentation methods. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tomoya Koike|AUTHOR Tomoya Koike]]^^1^^, [[Kun Qian|AUTHOR Kun Qian]]^^1^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^2^^, [[Yoshiharu Yamamoto|AUTHOR Yoshiharu Yamamoto]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Tokyo, Japan; ^^2^^Imperial College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2047–2051&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human hand-crafted features are always regarded as expensive, time-consuming, and difficult in almost all of the machine-learning-related tasks. First, those well-designed features extremely rely on human expert domain knowledge, which may restrain the collaboration work across fields. Second, the features extracted in such a brute-force scenario may not be easy to be transferred to another task, which means a series of new features should be designed. To this end, we introduce a method based on a transfer learning strategy combined with data augmentation techniques for the COMPARE 2020 Challenge //Mask// Sub-Challenge. Unlike the previous studies mainly based on pre-trained models by image data, we use a pre-trained model based on large scale audio data, i. e., AudioSet. In addition, the //SpecAugment// and //mixup// methods are used to improve the generalisation of the deep models. Experimental results demonstrate that the best-proposed model can significantly (p < .001, by one-tailed z-test) improve the unweighted average recall (UAR) from 71.8% (baseline) to 76.2% on the test set. Finally, the best result, i. e., 77.5% of the UAR on the test set, is achieved by a late fusion of the two best proposed models and the best single model in the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Steffen Illium|AUTHOR Steffen Illium]], [[Robert Müller|AUTHOR Robert Müller]], [[Andreas Sedlmeier|AUTHOR Andreas Sedlmeier]], [[Claudia Linnhoff-Popien|AUTHOR Claudia Linnhoff-Popien]]
</p><p class="cpabstractcardaffiliationlist">LMU München, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2052–2056&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In many fields of research, labeled data-sets are hard to acquire. This is where data augmentation promises to overcome the lack of training data in the context of neural network engineering and classification tasks. The idea here is to reduce model over-fitting to the feature distribution of a small under-descriptive training data-set. We try to evaluate such data augmentation techniques to gather insights in the performance boost they provide for several convolutional neural networks on mel-spectrogram representations of audio data. We show the impact of data augmentation on the binary classification task of surgical mask detection in samples of human voice (//ComParE Challenge 2020//). Also we consider four varying architectures to account for augmentation robustness. Results show that most of the baselines given by //ComParE// are outperformed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Philipp Klumpp|AUTHOR Philipp Klumpp]]^^1^^, [[Tomás Arias-Vergara|AUTHOR Tomás Arias-Vergara]]^^1^^, [[Juan Camilo Vásquez-Correa|AUTHOR Juan Camilo Vásquez-Correa]]^^1^^, [[Paula Andrea Pérez-Toro|AUTHOR Paula Andrea Pérez-Toro]]^^1^^, [[Florian Hönig|AUTHOR Florian Hönig]]^^1^^, [[Elmar Nöth|AUTHOR Elmar Nöth]]^^1^^, [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^FAU Erlangen-Nürnberg, Germany; ^^2^^Universidad de Antioquia, Colombia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2057–2061&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To solve the task of surgical mask detection from audio recordings in the scope of Interspeech’s ComParE challenge, we introduce a phonetic recognizer which is able to differentiate between clear and mask samples.

A deep recurrent phoneme recognition model is first trained on spectrograms from a German corpus to learn the spectral properties of different speech sounds. Under the assumption that each phoneme sounds differently among clear and mask speech, the model is then used to compute frame-wise phonetic labels for the challenge data, including information about the presence of a surgical mask. These labels served to train a second phoneme recognition model which is finally able to differentiate between mask and clear phoneme productions. For a single utterance, we can compute a functional representation and learn a random forest classifier to detect whether a speech sample was produced with or without a mask.

Our method performed better than the baseline methods on both validation and test set. Furthermore, we could show how wearing a mask influences the speech signal. Certain phoneme groups were clearly affected by the obstruction in front of the vocal tract, while others remained almost unaffected.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Claude Montacié|AUTHOR Claude Montacié]], [[Marie-José Caraty|AUTHOR Marie-José Caraty]]
</p><p class="cpabstractcardaffiliationlist">STIH (EA 4509), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2062–2066&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The INTERSPEECH 2020 Compare Mask Sub-Challenge is to determine whether a speech signal was emitted with or without wearing a surgical mask. For this purpose, we have investigated phonetic context and intelligibility measurements related to speech changes caused by wearing a mask. Experiments were conducted on the Mask Augsburg Speech Corpus (MASC) and on the Mask Sorbonne Speech Corpus (MSSC) both in German language. We investigated the effects of mask wearing on the acoustical properties of phonemes at frame and segment levels. At the frame level, a phonetic mask detector has been developed to determine the most sensitive phonemes when wearing a mask. At the segmental level, a perceptual scoring of intelligibility has been developed and assessed on the MSCC. Two mask detector systems have been developed and assessed on the MASC: the first one used two large composite audio feature sets, the second one used a bottom-up approach based on phonetic analysis and frame clustering. Experiments have shown an improvement of 5.9% (absolute) on the Test set compared to the official baseline performance of the Challenge (71.8%).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mariana Julião|AUTHOR Mariana Julião]], [[Alberto Abad|AUTHOR Alberto Abad]], [[Helena Moniz|AUTHOR Helena Moniz]]
</p><p class="cpabstractcardaffiliationlist">INESC-ID Lisboa, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2067–2071&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates the use of audio and text embeddings for the classification of emotion dimensions within the scope of the Elderly Emotion Sub-Challenge of the INTERSPEECH 2020 Computational Paralinguistics Challenge. We explore speaker and time dependencies on the expression of emotions through the combination of well-known acoustic-prosodic features and speaker embeddings extracted for different time scales. We consider text information input through transformer language embeddings, both isolated and in combination with acoustic features. The combination of acoustic and text information is explored in early and late fusion schemes. Overall, early fusion of systems trained on top of hand-crafted acoustic-prosodic features (eGeMAPS and ComParE), acoustic model feature embeddings (x-vectors), and text feature embeddings provide the best classification results in development for both Arousal and Valence. The combination of modalities allows us to reach a multi-dimension emotion classification performance in the development challenge data set of up to 48.8% Unweighted Average Recall (UAR) and 61.0% UAR for Arousal and Valence, respectively. These results correspond to a 16.2% and a 8.7% relative UAR improvement.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maxim Markitantov|AUTHOR Maxim Markitantov]]^^1^^, [[Denis Dresvyanskiy|AUTHOR Denis Dresvyanskiy]]^^2^^, [[Danila Mamontov|AUTHOR Danila Mamontov]]^^2^^, [[Heysem Kaya|AUTHOR Heysem Kaya]]^^3^^, [[Wolfgang Minker|AUTHOR Wolfgang Minker]]^^2^^, [[Alexey Karpov|AUTHOR Alexey Karpov]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^RAS, Russia; ^^2^^Universität Ulm, Germany; ^^3^^Universiteit Utrecht, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2072–2076&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes deep learning approaches for the Mask and Breathing Sub-Challenges (SCs), which are addressed by the INTERSPEECH 2020 Computational Paralinguistics Challenge. Motivated by outstanding performance of state-of-the-art end-to-end (E2E) approaches, we explore and compare effectiveness of different deep Convolutional Neural Network (CNN) architectures on raw data, log Mel-spectrograms, and Mel-Frequency Cepstral Coefficients. We apply a transfer learning approach to improve model’s efficiency and convergence speed. In the Mask SC, we conduct experiments with several pretrained CNN architectures on log-Mel spectrograms, as well as Support Vector Machines on baseline features. For the Breathing SC, we propose an ensemble deep learning system that exploits E2E learning and sequence prediction. The E2E model is based on 1D CNN operating on raw speech signals and is coupled with Long Short-Term Memory layers for sequence modeling. The second model works with log-Mel features and is based on a pretrained 2D CNN model stacked to Gated Recurrent Unit layers. To increase performance of our models in both SCs, we use ensembles of the best deep neural models obtained from N-fold cross-validation on combined challenge training and development datasets. Our results markedly outperform the challenge test set baselines in both SCs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[John Mendonça|AUTHOR John Mendonça]], [[Francisco Teixeira|AUTHOR Francisco Teixeira]], [[Isabel Trancoso|AUTHOR Isabel Trancoso]], [[Alberto Abad|AUTHOR Alberto Abad]]
</p><p class="cpabstractcardaffiliationlist">INESC-ID Lisboa, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2077–2081&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents our contribution to the INTERSPEECH 2020 Breathing Sub-challenge. Besides fulfilling the main goal of the challenge, which involves the automatic prediction from conversational speech of the breath signals obtained from respiratory belts, we also analyse both original and predicted signals in an attempt to overcome the main pitfalls of the proposed systems. In particular, we identify the subsets of most irregular belt signals which yield the worst performance, measured by the Pearson correlation coefficient, and show how they affect the results that were obtained by both the baseline end-to-end system and variants such as a Bidirectional LSTM. The performance of this type of architecture indicates that future information is also relevant when predicting breathing patterns.

We also study the information retained from the AM-FM decomposition of the speech signal for this purpose, showing how the AM component significantly outperforms the FM component on all experiments, but fails to surpass the prediction results obtained using the original speech signal.

Finally, we validate the system’s performance in video-conferencing conditions by using data augmentation and compare clinically relevant parameters, such as breathing rate, from both the original belt signals and the ones predicted from the simulated video-conferencing signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexis Deighton MacIntyre|AUTHOR Alexis Deighton MacIntyre]]^^1^^, [[Georgios Rizos|AUTHOR Georgios Rizos]]^^2^^, [[Anton Batliner|AUTHOR Anton Batliner]]^^3^^, [[Alice Baird|AUTHOR Alice Baird]]^^3^^, [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]]^^3^^, [[Antonia Hamilton|AUTHOR Antonia Hamilton]]^^1^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University College London, UK; ^^2^^Imperial College London, UK; ^^3^^Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2082–2086&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modelling of the breath signal is of high interest to both healthcare professionals and computer scientists, as a source of diagnosis-related information, or a means for curating higher quality datasets in speech analysis research. The formation of a breath signal gold standard is, however, not a straightforward task, as it requires specialised equipment, human annotation budget, and even then, it corresponds to lab recording settings, that are not reproducible in-the-wild. Herein, we explore deep learning based methodologies, as an automatic way to predict a continuous-time breath signal by solely analysing spontaneous speech. We address two task formulations, those of continuous-valued signal prediction, as well as inhalation event prediction, that are of great use in various healthcare and Automatic Speech Recognition applications, and showcase results that outperform current baselines. Most importantly, we also perform an initial exploration into explaining which parts of the input audio signal are important with respect to the prediction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Matej Martinc|AUTHOR Matej Martinc]], [[Senja Pollak|AUTHOR Senja Pollak]]
</p><p class="cpabstractcardaffiliationlist">JSI, Slovenia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2157–2161&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The paper describes a multimodal approach to the automated recognition of Alzheimer’s dementia in order to solve the ADReSS (Alzheimer’s Dementia Recognition through Spontaneous Speech) challenge at INTERSPEECH 2020. The proposed method exploits available audio and textual data from the benchmark speech dataset to address challenge’s two subtasks, a classification task that deals with classifying speech as dementia or healthy control speech and the regression task of determining the mini-mental state examination scores (MMSE) for each speech segment. Our approach is based on evaluating the predictive power of different types of features and on an exhaustive grid search across several feature combinations and different classification algorithms. Results suggest that even though TF-IDF based textual features generally lead to better classification and regression results, specific types of audio and readability features can boost the overall performance of the classification and regression models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anna Pompili|AUTHOR Anna Pompili]], [[Thomas Rolland|AUTHOR Thomas Rolland]], [[Alberto Abad|AUTHOR Alberto Abad]]
</p><p class="cpabstractcardaffiliationlist">INESC-ID Lisboa, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2202–2206&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes a multi-modal approach for the automatic detection of Alzheimer’s disease proposed in the context of the INESC-ID Human Language Technology Laboratory participation in the ADReSS 2020 challenge. Our classification framework takes advantage of both acoustic and textual feature embeddings, which are extracted independently and later combined. Speech signals are encoded into acoustic features using DNN speaker embeddings extracted from pre-trained models. For textual input, contextual embedding vectors are first extracted using an English Bert model and then used either to directly compute sentence embeddings or to feed a bidirectional LSTM-RNNs with attention. Finally, an SVM classifier with linear kernel is used for the individual evaluation of the three systems. Our best system, based on the combination of linguistic and acoustic information, attained a classification accuracy of 81.25%. Results have shown the importance of linguistic features in the classification of Alzheimer’s Disease, which outperforms the acoustic ones in terms of accuracy. Early stage features fusion did not provide additional improvements, confirming that the discriminant ability conveyed by speech in this case is smooth out by linguistic data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shahla Farzana|AUTHOR Shahla Farzana]], [[Natalie Parde|AUTHOR Natalie Parde]]
</p><p class="cpabstractcardaffiliationlist">University of Illinois at Chicago, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2207–2211&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Mini Mental State Examination (MMSE) is a standardized cognitive health screening test. It is generally administered by trained clinicians, which may be time-consuming and costly. An intriguing and scalable alternative is to detect changes in cognitive function by automatically monitoring individuals’ memory and language abilities from their conversational narratives. We work towards doing so by predicting clinical MMSE scores using verbal and non-verbal features extracted from the transcripts of 108 speech samples from the ADReSS Challenge dataset. We achieve a Root Mean Squared Error (RMSE) of 4.34, a percentage decrease of 29.3% over the existing performance benchmark. We also explore the performance impacts of acoustic versus linguistic, text-based features and find that linguistic features achieve lower RMSE scores, providing strong positive support for their inclusion in future MMSE score prediction models. Our best-performing model leverages a selection of verbal and non-verbal cues, demonstrating that MMSE score prediction is a rich problem that is best addressed using input from multiple perspectives.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Utkarsh Sarawgi|AUTHOR Utkarsh Sarawgi]], [[Wazeer Zulfikar|AUTHOR Wazeer Zulfikar]], [[Nouran Soliman|AUTHOR Nouran Soliman]], [[Pattie Maes|AUTHOR Pattie Maes]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2212–2216&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Alzheimer’s disease is estimated to affect around 50 million people worldwide and is rising rapidly, with a global economic burden of nearly a trillion dollars. This calls for scalable, cost-effective, and robust methods for detection of Alzheimer’s dementia (AD). We present a novel architecture that leverages acoustic, cognitive, and linguistic features to form a multimodal ensemble system. It uses specialized artificial neural networks with temporal characteristics to detect AD and its severity, which is reflected through Mini-Mental State Exam (MMSE) scores. We first evaluate it on the ADReSS challenge dataset, which is a subject-independent and balanced dataset matched for age and gender to mitigate biases, and is available through DementiaBank. Our system achieves state-of-the-art test accuracy, precision, recall, and F1-score of 83.3% each for AD classification, and state-of-the-art test root mean squared error (RMSE) of 4.60 for MMSE score regression. To the best of our knowledge, the system further achieves state-of-the-art AD classification accuracy of 88.0% when evaluated on the full benchmark DementiaBank Pitt database. Our work highlights the applicability and transferability of spontaneous speech to produce a robust inductive transfer learning model, and demonstrates generalizability through a task-agnostic feature-space. The source code is available at https://github.com/wazeerzulfikar/alzheimers-dementia</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Junghyun Koo|AUTHOR Junghyun Koo]], [[Jie Hwan Lee|AUTHOR Jie Hwan Lee]], [[Jaewoo Pyo|AUTHOR Jaewoo Pyo]], [[Yujin Jo|AUTHOR Yujin Jo]], [[Kyogu Lee|AUTHOR Kyogu Lee]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2217–2221&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Collecting and accessing a large amount of medical data is very time-consuming and laborious, not only because it is difficult to find specific patients but also because it is required to resolve the confidentiality of a patient’s medical records. On the other hand, there are deep learning models, trained on easily collectible, large scale datasets such as Youtube or Wikipedia, offering useful representations. It could therefore be very advantageous to utilize the features from these pre-trained networks for handling a small amount of data at hand. In this work, we exploit various multi-modal features extracted from pre-trained networks to recognize Alzheimer’s Dementia using a neural network, with a small dataset provided by the ADReSS Challenge at INTERSPEECH 2020. The challenge regards to discern patients suspicious of Alzheimer’s Dementia by providing acoustic and textual data. With the multi-modal features, we modify a Convolutional Recurrent Neural Network based structure to perform classification and regression tasks simultaneously and is capable of computing conversations with variable lengths. Our test results surpass baseline’s accuracy by 18.75%, and our validation result for the regression task shows the possibility of classifying 4 classes of cognitive impairment with an accuracy of 78.70%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Muhammad Shehram Shah Syed|AUTHOR Muhammad Shehram Shah Syed]]^^1^^, [[Zafi Sherhan Syed|AUTHOR Zafi Sherhan Syed]]^^2^^, [[Margaret Lech|AUTHOR Margaret Lech]]^^1^^, [[Elena Pirogova|AUTHOR Elena Pirogova]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^RMIT University, Australia; ^^2^^MUET, Pakistan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2222–2226&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Dementia is a neurodegenerative disease that leads to cognitive and (eventually) physical impairments. Individuals who are affected by dementia experience deterioration in their capacity to perform day-to-day tasks thereby significantly affecting their quality of life. This paper addresses the Interspeech 2020 Alzheimer’s Dementia Recognition through Spontaneous Speech (ADReSS) challenge where the objective is to propose methods for two tasks. The first task is to identify speech recordings from individuals with dementia amongst a set of recordings which also include those from healthy individuals. The second task requires participants to estimate the Mini-Mental State Examination (MMSE) score based on an individual’s speech alone. To this end, we investigated characteristics of speech paralinguistics such as prosody, voice quality, and spectra as well as VGGish based deep acoustic embedding for automated screening for dementia based on the audio modality. In addition to this, we also computed deep text embeddings for transcripts of speech. For the classification task, our method achieves an accuracy of 85.42% compared to the baseline of 62.50% on the test partition, meanwhile, for the regression task, our method achieves an RMSE = 4.30 compared to the baseline of 6.14. These results show the promise of our proposed methods for the task of automated screening for dementia based on speech alone.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiahong Yuan|AUTHOR Jiahong Yuan]]^^1^^, [[Yuchen Bian|AUTHOR Yuchen Bian]]^^1^^, [[Xingyu Cai|AUTHOR Xingyu Cai]]^^1^^, [[Jiaji Huang|AUTHOR Jiaji Huang]]^^1^^, [[Zheng Ye|AUTHOR Zheng Ye]]^^2^^, [[Kenneth Church|AUTHOR Kenneth Church]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Baidu, USA; ^^2^^CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2162–2166&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Disfluencies and language problems in Alzheimer’s Disease can be naturally modeled by fine-tuning Transformer-based pre-trained language models such as BERT and ERNIE. Using this method, we achieved 89.6% accuracy on the test set of the ADReSS (__A__lzheimer’s __D__ementia __R__ecognition through __S__pontaneous __S__peech) Challenge, a considerable improvement over the baseline of 75.0%, established by the organizers of the challenge. The best accuracy was obtained with ERNIE, plus an encoding of pauses. Robustness is a challenge for large models and small training sets. Ensemble over many runs of BERT/ERNIE fine-tuning reduced variance and improved accuracy. We found that //um// was used much less frequently in Alzheimer’s speech, compared to //uh//. We discussed this interesting finding from linguistic and cognitive perspectives.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aparna Balagopalan|AUTHOR Aparna Balagopalan]]^^1^^, [[Benjamin Eyre|AUTHOR Benjamin Eyre]]^^1^^, [[Frank Rudzicz|AUTHOR Frank Rudzicz]]^^2^^, [[Jekaterina Novikova|AUTHOR Jekaterina Novikova]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Winterlight Labs, Canada; ^^2^^University of Toronto, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2167–2171&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2020/MEDIA/2557" class="externallinkbutton" target="_blank">{{$:/causal/ZIP Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Research related to automatically detecting Alzheimer’s disease (AD) is important, given the high prevalence of AD and the high cost of traditional methods. Since AD significantly affects the content and acoustics of spontaneous speech, natural language processing and machine learning provide promising techniques for reliably detecting AD. We compare and contrast the performance of two such approaches for AD detection on the recent ADReSS challenge dataset [1]: 1) using domain knowledge-based hand-crafted features that capture linguistic and acoustic phenomena, and 2) fine-tuning Bidirectional Encoder Representations from Transformer (BERT)-based sequence classification models. We also compare multiple feature-based regression models for a neuropsychological score task in the challenge. We observe that fine-tuned BERT models, given the relative importance of linguistics in cognitive impairment detection, outperform feature-based approaches on the AD detection task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Saturnino Luz|AUTHOR Saturnino Luz]]^^1^^, [[Fasih Haider|AUTHOR Fasih Haider]]^^1^^, [[Sofia de la Fuente|AUTHOR Sofia de la Fuente]]^^1^^, [[Davida Fromm|AUTHOR Davida Fromm]]^^2^^, [[Brian MacWhinney|AUTHOR Brian MacWhinney]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Edinburgh, UK; ^^2^^Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2172–2176&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The ADReSS Challenge at INTERSPEECH 2020 defines a shared task through which different approaches to the automated recognition of Alzheimer’s dementia based on spontaneous speech can be compared. ADReSS provides researchers with a benchmark speech dataset which has been acoustically pre-processed and balanced in terms of age and gender, defining two cognitive assessment tasks, namely: the Alzheimer’s speech classification task and the neuropsychological score regression task. In the Alzheimer’s speech classification task, ADReSS challenge participants create models for classifying speech as dementia or healthy control speech. In the neuropsychological score regression task, participants create models to predict mini-mental state examination scores. This paper describes the ADReSS Challenge in detail and presents a baseline for both tasks, including feature extraction procedures and results for classification and regression models. ADReSS aims to provide the speech and language Alzheimer’s research community with a platform for comprehensive methodological comparisons. This will hopefully contribute to addressing the lack of standardisation that currently affects the field and shed light on avenues for future research and clinical applicability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Raghavendra Pappagari|AUTHOR Raghavendra Pappagari]], [[Jaejin Cho|AUTHOR Jaejin Cho]], [[Laureano Moro-Velázquez|AUTHOR Laureano Moro-Velázquez]], [[Najim Dehak|AUTHOR Najim Dehak]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2177–2181&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we analyze the use of state-of-the-art technologies for speaker recognition and natural language processing to detect Alzheimer’s Disease (AD) and to assess its severity predicting Mini-mental status evaluation (MMSE) scores. With these purposes, we study the use of speech signals and transcriptions. Our work focuses on the adaptation of state-of-the-art models for both modalities individually and together to examine its complementarity. We used x-vectors to characterize speech signals and pre-trained BERT models to process human transcriptions with different back-ends in AD diagnosis and assessment. We evaluated features based on silence segments of the audio files as a complement to x-vectors. We trained and evaluated our systems in the Interspeech 2020 ADReSS challenge dataset, containing 78 AD patients and 78 sex and age-matched controls. Our results indicate that the fusion of scores obtained from the acoustic and the transcript-based models provides the best detection and assessment results, suggesting that individual models for two modalities contain complementary information. The addition of the silence-related features improved the fusion system even further. A separate analysis of the models suggests that transcript-based models provide better results than acoustic models in the detection task but similar results in the MMSE prediction task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nicholas Cummins|AUTHOR Nicholas Cummins]]^^1^^, [[Yilin Pan|AUTHOR Yilin Pan]]^^2^^, [[Zhao Ren|AUTHOR Zhao Ren]]^^1^^, [[Julian Fritsch|AUTHOR Julian Fritsch]]^^3^^, [[Venkata Srikanth Nallanthighal|AUTHOR Venkata Srikanth Nallanthighal]]^^4^^, [[Heidi Christensen|AUTHOR Heidi Christensen]]^^2^^, [[Daniel Blackburn|AUTHOR Daniel Blackburn]]^^2^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^5^^, [[Mathew Magimai-Doss|AUTHOR Mathew Magimai-Doss]]^^3^^, [[Helmer Strik|AUTHOR Helmer Strik]]^^6^^, [[Aki Härmä|AUTHOR Aki Härmä]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Augsburg, Germany; ^^2^^University of Sheffield, UK; ^^3^^Idiap Research Institute, Switzerland; ^^4^^Philips, The Netherlands; ^^5^^Universität Augsburg, Germany; ^^6^^Radboud Universiteit, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2182–2186&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the light of the current COVID-19 pandemic, the need for remote digital health assessment tools is greater than ever. This statement is especially pertinent for elderly and vulnerable populations. In this regard, the INTERSPEECH 2020 Alzheimer’s Dementia Recognition through Spontaneous Speech (ADReSS) Challenge offers competitors the opportunity to develop speech and language-based systems for the task of Alzheimer’s Dementia (AD) recognition. The challenge data consists of speech recordings and their transcripts, the work presented herein is an assessment of different contemporary approaches on these modalities. Specifically, we compared a hierarchical neural network with an attention mechanism trained on linguistic features with three acoustic-based systems: (i) Bag-of-Audio-Words (BoAW) quantising different low-level descriptors, (ii) a Siamese Network trained on log-Mel spectrograms, and (iii) a Convolutional Neural Network (CNN) end-to-end system trained on raw waveforms. Key results indicate the strength of the linguistic approach over the acoustics systems. Our strongest test-set result was achieved using a late fusion combination of BoAW, End-to-End CNN, and hierarchical-attention networks, which outperformed the challenge baseline in both the classification and regression tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Morteza Rohanian|AUTHOR Morteza Rohanian]], [[Julian Hough|AUTHOR Julian Hough]], [[Matthew Purver|AUTHOR Matthew Purver]]
</p><p class="cpabstractcardaffiliationlist">Queen Mary University of London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2187–2191&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper is a submission to the Alzheimer’s Dementia Recognition through Spontaneous Speech (ADReSS) challenge, which aims to develop methods that can assist in the automated prediction of severity of Alzheimer’s Disease from speech data. We focus on acoustic and natural language features for cognitive impairment detection in spontaneous speech in the context of Alzheimer’s Disease Diagnosis and the mini-mental state examination (MMSE) score prediction. We proposed a model that obtains unimodal decisions from different LSTMs, one for each modality of text and audio, and then combines them using a gating mechanism for the final prediction. We focused on sequential modelling of text and audio and investigated whether the disfluencies present in individuals’ speech relate to the extent of their cognitive impairment. Our results show that the proposed classification and regression schemes obtain very promising results on both development and test sets. This suggests Alzheimer’s Disease can be detected successfully with sequence modeling of the speech data of medical sessions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Thomas Searle|AUTHOR Thomas Searle]], [[Zina Ibrahim|AUTHOR Zina Ibrahim]], [[Richard Dobson|AUTHOR Richard Dobson]]
</p><p class="cpabstractcardaffiliationlist">King’s College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2192–2196&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Alzheimer’s Dementia (AD) is an incurable, debilitating, and progressive neurodegenerative condition that affects cognitive function. Early diagnosis is important as therapeutics can delay progression and give those diagnosed vital time. Developing models that analyse spontaneous speech could eventually provide an efficient diagnostic modality for earlier diagnosis of AD. The Alzheimer’s Dementia Recognition through Spontaneous Speech task offers acoustically pre-processed and balanced datasets for the classification and prediction of AD and associated phenotypes through the modelling of spontaneous speech. We exclusively analyse the supplied textual transcripts of the spontaneous speech dataset, building and comparing performance across numerous models for the classification of AD vs controls and the prediction of Mental Mini State Exam scores. We rigorously train and evaluate Support Vector Machines (SVMs), Gradient Boosting Decision Trees (GBDT), and Conditional Random Fields (CRFs) alongside deep learning Transformer based models. We find our top performing models to be a simple Term Frequency-Inverse Document Frequency (TF-IDF) vectoriser as input into a SVM model and a pre-trained Transformer based model ‘DistilBERT’ when used as an embedding layer into simple linear models. We demonstrate test set scores of 0.81–0.82 across classification metrics and a RMSE of 4.58.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Erik Edwards|AUTHOR Erik Edwards]], [[Charles Dognin|AUTHOR Charles Dognin]], [[Bajibabu Bollepalli|AUTHOR Bajibabu Bollepalli]], [[Maneesh Singh|AUTHOR Maneesh Singh]]
</p><p class="cpabstractcardaffiliationlist">Verisk Analytics, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2197–2201&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the Verisk submission to The ADReSS Challenge [1]. We analyze the text data at both the word level and phoneme level, which leads to our best-performing system in combination with audio features. Thus, the system is both multi-modal (audio and text) and multi-scale (word and phoneme levels). Experiments with larger neural language models did not result in improvement, given the small amount of text data available. By contrast, the phoneme representation has a vocabulary size of only 66 tokens and could be trained from scratch on the present data. Therefore, we believe this method to be useful in cases of limited text data, as in many medical settings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jens Heitkaemper|AUTHOR Jens Heitkaemper]], [[Joerg Schmalenstroeer|AUTHOR Joerg Schmalenstroeer]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]
</p><p class="cpabstractcardaffiliationlist">Universität Paderborn, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2597–2601&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech activity detection (SAD), which often rests on the fact that the noise is “more” stationary than speech, is particularly challenging in non-stationary environments, because the time variance of the acoustic scene makes it difficult to discriminate speech from noise. We propose two approaches to SAD, where one is based on statistical signal processing, while the other utilizes neural networks. The former employs sophisticated signal processing to track the noise and speech energies and is meant to support the case for a resource efficient, unsupervised signal processing approach. The latter introduces a recurrent network layer that operates on short segments of the input speech to do temporal smoothing in the presence of non-stationary noise. The systems are tested on the Fearless Steps challenge database, which consists of the transmission data from the Apollo-11 space mission. The statistical SAD achieves comparable detection performance to earlier proposed neural network based SADs, while the neural network based approach leads to a decision cost function of 1.07% on the evaluation set of the 2020 Fearless Steps Challenge, which sets a new state of the art.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xueshuai Zhang|AUTHOR Xueshuai Zhang]], [[Wenchao Wang|AUTHOR Wenchao Wang]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]]
</p><p class="cpabstractcardaffiliationlist">CAS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2602–2606&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the ASRGroup team speaker diarization systems submitted to the TRACK 2 of the Fearless Steps Challenge Phase-2. In this system, the similarity matrix among all segments of an audio recording was measured by Sequential Bidirectional Long Short-term Memory Networks (Bi-LSTM), and a clustering scheme based on Density Peak Cluster Algorithm (DPCA) was proposed to clustering the segments. The system was compared with the Kaldi Toolkit diarization system (x-vector based on TDNN with PLDA scoring model) and the Spectral system (similarity based on Bi-LSTM with Spectral clustering algorithm). Experiments show that our system is significantly outperforms above systems and achieves a Diarization Error Rate (DER) of 42.75% and 39.52% respectively on the Dev dataset and Eval dataset of TRACK 2 (Fearless Steps Challenge Phase-2). Compared with the baseline Kaldi Toolkit diarization system and Spectral Clustering algorithm with Bi-LSTM similarity models, the DER of our system is absolutely reduced 4.64%, 1.84% and 8.85%, 7.57% respectively on the two datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qingjian Lin|AUTHOR Qingjian Lin]], [[Tingle Li|AUTHOR Tingle Li]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2607–2611&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the systems developed by the DKU team for the Fearless Steps Challenge Phase-02 competition. For the Speech Activity Detection task, we start with the Long Short-Term Memory (LSTM) system and then apply the ResNet-LSTM improvement. Our ResNet-LSTM system reduces the DCF error by about 38% relatively in comparison with the LSTM baseline. We also discuss the system performance with additional training corpora included, and the lowest DCF of 1.406% on the Eval Set is gained with system pre-training. As for the Speaker Identification task, we employ the Deep ResNet vector system, which receives a variable-length feature sequence and directly generates speaker posteriors. The pretraining process with Voxceleb is also considered, and our best-performing system achieves the Top-5 accuracy of 92.393% on the Eval Set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Arseniy Gorin|AUTHOR Arseniy Gorin]], [[Daniil Kulko|AUTHOR Daniil Kulko]], [[Steven Grima|AUTHOR Steven Grima]], [[Alex Glasman|AUTHOR Alex Glasman]]
</p><p class="cpabstractcardaffiliationlist">Behavox, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2612–2616&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe the speech activity detection (SAD), speaker diarization (SD), and automatic speech recognition (ASR) experiments conducted by the Behavox team for the Interspeech 2020 Fearless Steps Challenge (FSC-2). A relatively small amount of labeled data, a large variety of speakers and channel distortions, specific lexicon and speaking style resulted in high error rates on the systems which involved this data. In addition to approximately 36 hours of annotated NASA mission recordings, the organizers provided a much larger but unlabeled 19k hour Apollo-11 corpus that we also explore for semi-supervised training of ASR acoustic and language models, observing more than 17% relative word error rate improvement compared to training on the FSC-2 data only. We also compare several SAD and SD systems to approach the most difficult tracks of the challenge (track 1 for diarization and ASR), where long 30-minute audio recordings are provided for evaluation without segmentation or speaker information. For all systems, we report substantial performance improvements compared to the FSC-2 baseline systems, and achieved a first-place ranking for SD and ASR and fourth-place for SAD in the challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aditya Joglekar|AUTHOR Aditya Joglekar]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]], [[Meena Chandra Shekar|AUTHOR Meena Chandra Shekar]], [[Abhijeet Sangwan|AUTHOR Abhijeet Sangwan]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2617–2621&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Fearless Steps Initiative by UTDallas-CRSS led to the digitization, recovery, and diarization of 19,000 hours of original analog audio data, as well as the development of algorithms to extract meaningful information from this multi-channel naturalistic data resource. The 2020 FEARLESS STEPS (FS-2) Challenge is the second annual challenge held for the Speech and Language Technology community to motivate supervised learning algorithm development for multi-party and multi-stream naturalistic audio. In this paper, we present an overview of the challenge sub-tasks, data, performance metrics, and lessons learned from Phase-2 of the Fearless Steps Challenge (FS-2). We present advancements made in FS-2 through extensive community outreach and feedback. We describe innovations in the challenge corpus development, and present revised baseline results. We finally discuss the challenge outcome and general trends in system development across both phases (Phase FS-1 Unsupervised, and Phase FS-2 Supervised) of the challenge, and its continuation into multi-channel challenge tasks for the upcoming Fearless Steps Challenge Phase-3.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ivan Halim Parmonangan|AUTHOR Ivan Halim Parmonangan]], [[Hiroki Tanaka|AUTHOR Hiroki Tanaka]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]
</p><p class="cpabstractcardaffiliationlist">NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2762–2766&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Since the perceived audio quality of the synthesized speech may determine a system’s market success, quality evaluations are critical. Audio quality evaluations are usually done in either subjectively or objectively. Due to their costly and time-consuming nature, the subjective approaches have generally been replaced by the faster, more cost-efficient objective approaches. The primary downside of the objective approaches primarily is that they lack the human influence factors which are crucial for deriving the subjective perception of quality. However, it cannot be observed directly and manifested in individual brain activity. Thus, we combined predictions from single-subject electroencephalograph (EEG) information and audio features to improve the predictions of the overall quality of synthesized speech. Our result shows that by combining the results from both audio and EEG models, a very simple neural network can surpass the performance of the single-modal approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rini A. Sharon|AUTHOR Rini A. Sharon]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]
</p><p class="cpabstractcardaffiliationlist">IIT Madras, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2767–2771&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech cognition bears potential application as a brain computer interface that can improve the quality of life for the otherwise communication impaired people. While speech and resting state EEG are popularly studied, here we attempt to explore a “non-speech” (NS) state of brain activity corresponding to the silence regions of speech audio. Firstly, speech perception is studied to inspect the existence of such a state, followed by its identification in speech imagination. Analogous to how voice activity detection is employed to enhance the performance of speech recognition, the EEG state activity detection protocol implemented here is applied to boost the confidence of imagined speech EEG decoding. Classification of speech and NS state is done using two datasets collected from laboratory-based and commercial-based devices. The state sequential information thus obtained is further utilized to reduce the search space of imagined EEG unit recognition. Temporal signal structures and topographic maps of NS states are visualized across subjects and sessions. The recognition performance and the visual distinction observed demonstrates the existence of silence signatures in EEG.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siqi Cai|AUTHOR Siqi Cai]]^^1^^, [[Enze Su|AUTHOR Enze Su]]^^1^^, [[Yonghao Song|AUTHOR Yonghao Song]]^^1^^, [[Longhan Xie|AUTHOR Longhan Xie]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SCUT, China; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2772–2776&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A listener listens to one speech stream at a time in a multi-speaker scenario. EEG-based auditory attention detection (AAD) aims to identify to which speech stream the listener has attended using EEG signals. The performance of linear modeling approaches is limited due to the non-linear nature of the human auditory perception. Furthermore, the real-world applications call for low latency AAD solutions in noisy environments. In this paper, we propose to adopt common spatial pattern (CSP) analysis to enhance the discriminative ability of EEG signals. We study the use of convolutional neural network (CNN) as the non-linear solution. The experiments show that it is possible to decode auditory attention within 2 seconds, with a competitive accuracy of 80.2%, even in noisy acoustic environments. The results are encouraging for brain-computer interfaces, such as hearing aids, which require real-time responses, and robust AAD in complex acoustic environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Miguel Angrick|AUTHOR Miguel Angrick]]^^1^^, [[Christian Herff|AUTHOR Christian Herff]]^^2^^, [[Garett Johnson|AUTHOR Garett Johnson]]^^3^^, [[Jerry Shih|AUTHOR Jerry Shih]]^^4^^, [[Dean Krusienski|AUTHOR Dean Krusienski]]^^5^^, [[Tanja Schultz|AUTHOR Tanja Schultz]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Bremen, Germany; ^^2^^Universiteit Maastricht, The Netherlands; ^^3^^Old Dominion University, USA; ^^4^^UC San Diego Health, USA; ^^5^^Virginia Commonwealth University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2777–2781&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Direct synthesis from intracranial brain activity into acoustic speech might provide an intuitive and natural communication means for speech-impaired users. In previous studies we have used logarithmic Mel-scaled speech spectrograms (logMels) as an intermediate representation in the decoding from ElectroCorticoGraphic (ECoG) recordings to an audible waveform. Mel-scaled speech spectrograms have a long tradition in acoustic speech processing and speech synthesis applications. In the past, we relied on regression approaches to find a mapping from brain activity to logMel spectral coefficients, due to the continuous feature space. However, regression tasks are unbounded and thus neuronal fluctuations in brain activity may result in abnormally high amplitudes in a synthesized acoustic speech signal. To mitigate these issues, we propose two methods for quantization of power values to discretize the feature space of logarithmic Mel-scaled spectral coefficients by using the median and the logistic formula, respectively, to reduce the complexity and restricting the number of intervals. We evaluate the practicability in a proof-of-concept with one participant through a simple classification based on linear discriminant analysis and compare the resulting waveform with the original speech. Reconstructed spectrograms achieve Pearson correlation coefficients with a mean of r=0.5 ± 0.11 in a 5-fold cross validation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Debadatta Dash|AUTHOR Debadatta Dash]]^^1^^, [[Paul Ferrari|AUTHOR Paul Ferrari]]^^2^^, [[Angel Hernandez|AUTHOR Angel Hernandez]]^^3^^, [[Daragh Heitzman|AUTHOR Daragh Heitzman]]^^4^^, [[Sara G. Austin|AUTHOR Sara G. Austin]]^^1^^, [[Jun Wang|AUTHOR Jun Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Austin, USA; ^^2^^Dell Children’s Medical Center, USA; ^^3^^Helen DeVos Children’s Hospital, USA; ^^4^^Texas Neurology, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2782–2786&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2020/MEDIA/3071" class="externallinkbutton" target="_blank">{{$:/causal/ZIP Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Amyotrophic lateral sclerosis (ALS) is a motor neuron disease that may cause locked-in syndrome (completely paralyzed but aware). These locked-in patients can communicate with brain-computer interfaces (BCI), e.g. EEG spellers, which have a low communication rate. Recent research has progressed towards neural speech decoding paradigms that have the potential for normal communication rates. Yet, current neural decoding research is limited to typical speakers and the extent to which these studies can be translated to a target population (e.g., ALS) is still unexplored. Here, we investigated the decoding of imagined and spoken phrases from non-invasive magnetoencephalography (MEG) signals of ALS subjects using several spectral features (band-power of brainwaves: delta, theta, alpha, beta, and gamma frequencies) with seven machine learning decoders (Naive Bayes, K-nearest neighbor, decision tree, ensemble, support vector machine, linear discriminant analysis, and artificial neural network). Experimental results indicated that the decoding performance for ALS patients is lower than healthy subjects yet significantly higher than chance level. The best performances were 75% for decoding five imagined phrases and 88% for five spoken phrases from ALS patients. To our knowledge, this is the first demonstration of neural speech decoding for a speech disordered population.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoyi Qin|AUTHOR Xiaoyi Qin]]^^1^^, [[Ming Li|AUTHOR Ming Li]]^^2^^, [[Hui Bu|AUTHOR Hui Bu]]^^3^^, [[Wei Rao|AUTHOR Wei Rao]]^^4^^, [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]]^^4^^, [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]^^5^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Duke Kunshan University, China; ^^2^^Duke Kunshan University, China; ^^3^^AISHELL Foundation, China; ^^4^^NUS, Singapore; ^^5^^University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3456–3460&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The INTERSPEECH 2020 Far-Field Speaker Verification Challenge (FFSVC 2020) addresses three different research problems under well-defined conditions: far-field text-dependent speaker verification from single microphone array, far-field text-independent speaker verification from single microphone array, and far-field text-dependent speaker verification from distributed microphone arrays. All three tasks pose a cross-channel challenge to the participants. To simulate the real-life scenario, the enrollment utterances are recorded from close-talk cellphone, while the test utterances are recorded from the far-field microphone arrays. In this paper, we describe the database, the challenge, and the baseline system, which is based on a ResNet-based deep speaker network with cosine similarity scoring. For a given utterance, the speaker embeddings of different channels are equally averaged as the final embedding. The baseline system achieves minDCFs of 0.62, 0.66, and 0.64 and EERs of 6.27%, 6.55%, and 7.18% for task 1, task 2, and task 3, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peng Zhang|AUTHOR Peng Zhang]]^^1^^, [[Peng Hu|AUTHOR Peng Hu]]^^2^^, [[Xueliang Zhang|AUTHOR Xueliang Zhang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Inner Mongolia University, China; ^^2^^Elevoc Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3461–3465&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we present an effective deep embedding learning architecture for speaker verification task. Compared with the widely used residual neural network (ResNet) and time-delay neural network (TDNN) based architectures, two main improvements are proposed: 1) We use densely connected convolutional network (DenseNet) to encode the short term context information of the speaker. 2) A bidirectional attentive pooling strategy is proposed to further model the long term temporal context and aggregate the important frames which reflect the speaker identity. We evaluate the proposed architecture on the task of text-dependent speaker verification in the Interspeech 2020 Far Field Speaker Verification Challenge (FFSVC2020). Result shows that the proposed algorithm outperforms the official baseline of FFSVC2020 with 8.06%, 19.70% minDCFs and 9.26%, 16.16% EERs relative reductions on the evaluation set of Task 1 and Task 3 respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aleksei Gusev|AUTHOR Aleksei Gusev]]^^1^^, [[Vladimir Volokhov|AUTHOR Vladimir Volokhov]]^^2^^, [[Alisa Vinogradova|AUTHOR Alisa Vinogradova]]^^1^^, [[Tseren Andzhukaev|AUTHOR Tseren Andzhukaev]]^^2^^, [[Andrey Shulipa|AUTHOR Andrey Shulipa]]^^1^^, [[Sergey Novoselov|AUTHOR Sergey Novoselov]]^^1^^, [[Timur Pekhovsky|AUTHOR Timur Pekhovsky]]^^2^^, [[Alexander Kozlov|AUTHOR Alexander Kozlov]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ITMO University, Russia; ^^2^^STC-innovations, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3466–3470&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents speaker recognition (SR) systems submitted by the Speech Technology Center (STC) team to the Far-Field Speaker Verification Challenge 2020. SR tasks of the challenge are focused on the problem of far-field text-dependent speaker verification from single microphone array (Track 1), far-field text-independent speaker verification from single microphone array (Track 2) and far-field text-dependent speaker verification from distributed microphone arrays (Track 3).

In this paper, we present techniques and ideas underlying our best performing models. A number of experiments on x-vector-based and ResNet-like architectures show that ResNet-based networks outperform x-vector-based systems. Submitted systems are the fusions of ResNet34-based extractors, trained on 80 Log Mel-filter bank energies (MFBs) post-processed with U-net-like voice activity detector (VAD). The best systems for the Track 1, Track 2 and Track 3 achieved 5.08% EER and 0.500 C^^min^^,,det,,, 5.39% EER and 0.541 C^^min^^,,det,, and 5.53% EER and 0.458 C^^min^^,,det,, on the challenge evaluation sets respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Li Zhang|AUTHOR Li Zhang]], [[Jian Wu|AUTHOR Jian Wu]], [[Lei Xie|AUTHOR Lei Xie]]
</p><p class="cpabstractcardaffiliationlist">Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3471–3475&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the NPU system submitted to Interspeech 2020 Far-Field Speaker Verification Challenge (FFSVC). We particularly focus on far-field text-dependent SV from single (task1) and multiple microphone arrays (task3). The major challenges in such scenarios are //short utterance// and //cross-channel and distance mismatch// for enrollment and test. With the belief that better speaker embedding can alleviate the effects from short utterance, we introduce a new speaker embedding architecture — ResNet-BAM, which integrates a bottleneck attention module with ResNet as a simple and efficient way to further improve representation power of ResNet. This contribution brings up to 1% EER reduction. We further address the mismatch problem in three directions. First, //domain adversarial training//, which aims to learn domain-invariant features, can yield to 0.8% EER reduction. Second, //front-end signal processing//, including WPE and beamforming, has no obvious contribution, but together with data selection and domain adversarial training, can further contribute to 0.5% EER reduction. Finally, data augmentation, which works with a specifically-designed data selection strategy, can lead to 2% EER reduction. Together with the above contributions, in the middle challenge results, our single submission system (without multi-system fusion) achieves the first and second place on task 1 and task 3, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ying Tong|AUTHOR Ying Tong]], [[Wei Xue|AUTHOR Wei Xue]], [[Shanluo Huang|AUTHOR Shanluo Huang]], [[Lu Fan|AUTHOR Lu Fan]], [[Chao Zhang|AUTHOR Chao Zhang]], [[Guohong Ding|AUTHOR Guohong Ding]], [[Xiaodong He|AUTHOR Xiaodong He]]
</p><p class="cpabstractcardaffiliationlist">JD.com, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3476–3480&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents the development of our systems for the Interspeech 2020 Far-Field Speaker Verification Challenge (FFSVC). Our focus is the task 2 of the challenge, which is to perform far-field text-independent speaker verification using a single microphone array. The FFSVC training set provided by the challenge is augmented by pre-processing the far-field data with both beamforming, voice channel switching, and a combination of weighted prediction error (WPE) and beamforming. Two open-access corpora, CHData in Mandarin and VoxCeleb2 in English, are augmented using multiple methods and mixed with the augmented FFSVC data to form the final training data. Four different model structures are used to model speaker characteristics: ResNet, extended time-delay neural network (ETDNN), Transformer, and factorized TDNN (FTDNN), whose output values are pooled across time using the self-attentive structure, the statistic pooling structure, and the GVLAD structure. The final results are derived by fusing the adaptively normalized scores of the four systems with a two-stage fusion method, which achieves a minimum of the detection cost function (minDCF) of 0.3407 and an equal error rate (EER) of 2.67% on the development set of the challenge.</p></div>
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cptablecelltopbottomspace2|k
|cpsessionlisttable|k
|^<div class="cpsessionlistsessioncode">[[Mon-1-1|SESSION Mon-1-1 — ASR Neural Network Architectures I]]</div> |^<div class="cpsessionlistsessionname">ASR Neural Network Architectures I</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-2|SESSION Mon-1-2 — Multi-Channel Speech Enhancement]]</div> |^<div class="cpsessionlistsessionname">Multi-Channel Speech Enhancement</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-3|SESSION Mon-1-3 — Speech Processing in the Brain]]</div> |^<div class="cpsessionlistsessionname">Speech Processing in the Brain</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-4|SESSION Mon-1-4 — Speech Signal Representation]]</div> |^<div class="cpsessionlistsessionname">Speech Signal Representation</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-5|SESSION Mon-1-5 — Speech Synthesis: Neural Waveform Generation I]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Neural Waveform Generation I</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-SS-1-6|SESSION Mon-SS-1-6 — Automatic Speech Recognition for Non-Native Children’s Speech]]</div> |^<div class="cpsessionlistsessionname">Automatic Speech Recognition for Non-Native Children&#8217;s Speech</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-7|SESSION Mon-1-7 — Speaker Diarization]]</div> |^<div class="cpsessionlistsessionname">Speaker Diarization</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-8|SESSION Mon-1-8 — Noise Robust and Distant Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Noise Robust and Distant Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-9|SESSION Mon-1-9 — Speech in Multimodality]]</div> |^<div class="cpsessionlistsessionname">Speech in Multimodality</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-10|SESSION Mon-1-10 — Speech, Language, and Multimodal Resources]]</div> |^<div class="cpsessionlistsessionname">Speech, Language, and Multimodal Resources</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-11|SESSION Mon-1-11 — Language Recognition]]</div> |^<div class="cpsessionlistsessionname">Language Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-1-12|SESSION Mon-1-12 — Speech Processing and Analysis]]</div> |^<div class="cpsessionlistsessionname">Speech Processing and Analysis</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-1|SESSION Mon-2-1 — Speech Emotion Recognition I]]</div> |^<div class="cpsessionlistsessionname">Speech Emotion Recognition I</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-2|SESSION Mon-2-2 — ASR Neural Network Architectures and Training I]]</div> |^<div class="cpsessionlistsessionname">ASR Neural Network Architectures and Training I</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-3|SESSION Mon-2-3 — Evaluation of Speech Technology Systems and Methods for Resource Construction and Annotation]]</div> |^<div class="cpsessionlistsessionname">Evaluation of Speech Technology Systems and Methods for Resource Construction and Annotation</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-4|SESSION Mon-2-4 — Phonetics and Phonology]]</div> |^<div class="cpsessionlistsessionname">Phonetics and Phonology</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-5|SESSION Mon-2-5 — Topics in ASR I]]</div> |^<div class="cpsessionlistsessionname">Topics in ASR I</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-SS-2-6|SESSION Mon-SS-2-6 — Large-Scale Evaluation of Short-Duration Speaker Verification]]</div> |^<div class="cpsessionlistsessionname">Large-Scale Evaluation of Short-Duration Speaker Verification</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-7|SESSION Mon-2-7 — Voice Conversion and Adaptation I]]</div> |^<div class="cpsessionlistsessionname">Voice Conversion and Adaptation I</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-8|SESSION Mon-2-8 — Acoustic Event Detection]]</div> |^<div class="cpsessionlistsessionname">Acoustic Event Detection</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-9|SESSION Mon-2-9 — Spoken Language Understanding I]]</div> |^<div class="cpsessionlistsessionname">Spoken Language Understanding I</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-10|SESSION Mon-2-10 — DNN Architectures for Speaker Recognition]]</div> |^<div class="cpsessionlistsessionname">DNN Architectures for Speaker Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-11|SESSION Mon-2-11 — ASR Model Training and Strategies]]</div> |^<div class="cpsessionlistsessionname">ASR Model Training and Strategies</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-2-12|SESSION Mon-2-12 — Speech Annotation and Speech Assessment]]</div> |^<div class="cpsessionlistsessionname">Speech Annotation and Speech Assessment</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-1|SESSION Mon-3-1 — Cross/Multi-Lingual and Code-Switched Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Cross/Multi-Lingual and Code-Switched Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-2|SESSION Mon-3-2 — Anti-Spoofing and Liveness Detection]]</div> |^<div class="cpsessionlistsessionname">Anti-Spoofing and Liveness Detection</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-3|SESSION Mon-3-3 — Noise Reduction and Intelligibility]]</div> |^<div class="cpsessionlistsessionname">Noise Reduction and Intelligibility</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-4|SESSION Mon-3-4 — Acoustic Scene Classification]]</div> |^<div class="cpsessionlistsessionname">Acoustic Scene Classification</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-5|SESSION Mon-3-5 — Singing Voice Computing and Processing in Music]]</div> |^<div class="cpsessionlistsessionname">Singing Voice Computing and Processing in Music</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-7|SESSION Mon-3-7 — Acoustic Model Adaptation for ASR]]</div> |^<div class="cpsessionlistsessionname">Acoustic Model Adaptation for ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-8|SESSION Mon-3-8 — Singing and Multimodal Synthesis]]</div> |^<div class="cpsessionlistsessionname">Singing and Multimodal Synthesis</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-9|SESSION Mon-3-9 — Intelligibility-Enhancing Speech Modification]]</div> |^<div class="cpsessionlistsessionname">Intelligibility-Enhancing Speech Modification</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-10|SESSION Mon-3-10 — Human Speech Production I]]</div> |^<div class="cpsessionlistsessionname">Human Speech Production I</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-3-11|SESSION Mon-3-11 — Targeted Source Separation]]</div> |^<div class="cpsessionlistsessionname">Targeted Source Separation</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-1|SESSION Tue-1-1 — Speech Translation and Multilingual/Multimodal Learning]]</div> |^<div class="cpsessionlistsessionname">Speech Translation and Multilingual/Multimodal Learning</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-2|SESSION Tue-1-2 — Speaker Recognition I]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition I</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-3|SESSION Tue-1-3 — Spoken Language Understanding II]]</div> |^<div class="cpsessionlistsessionname">Spoken Language Understanding II</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-4|SESSION Tue-1-4 — Human Speech Processing]]</div> |^<div class="cpsessionlistsessionname">Human Speech Processing</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-5|SESSION Tue-1-5 — Feature Extraction and Distant ASR]]</div> |^<div class="cpsessionlistsessionname">Feature Extraction and Distant ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-SS-1-6|SESSION Tue-SS-1-6 — Voice Privacy Challenge]]</div> |^<div class="cpsessionlistsessionname">Voice Privacy Challenge</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-7|SESSION Tue-1-7 — Speech Synthesis: Text Processing, Data and Evaluation]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Text Processing, Data and Evaluation</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-8|SESSION Tue-1-8 — Search for Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Search for Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-9|SESSION Tue-1-9 — Computational Paralinguistics I]]</div> |^<div class="cpsessionlistsessionname">Computational Paralinguistics I</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-1-10|SESSION Tue-1-10 — Acoustic Phonetics and Prosody]]</div> |^<div class="cpsessionlistsessionname">Acoustic Phonetics and Prosody</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-1|SESSION Wed-1-1 — Tonal Aspects of Acoustic Phonetics and Prosody]]</div> |^<div class="cpsessionlistsessionname">Tonal Aspects of Acoustic Phonetics and Prosody</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-2|SESSION Wed-1-2 — Speech Classification]]</div> |^<div class="cpsessionlistsessionname">Speech Classification</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-3|SESSION Wed-1-3 — Speech Synthesis Paradigms and Methods I]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis Paradigms and Methods I</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-1-4|SESSION Wed-SS-1-4 — The INTERSPEECH 2020 Computational Paralinguistics ChallengE (ComParE)]]</div> |^<div class="cpsessionlistsessionname">The INTERSPEECH 2020 Computational Paralinguistics ChallengE (ComParE)</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-5|SESSION Wed-1-5 — Streaming ASR]]</div> |^<div class="cpsessionlistsessionname">Streaming ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-1-6|SESSION Wed-SS-1-6 — Alzheimer’s Dementia Recognition Through Spontaneous Speech]]</div> |^<div class="cpsessionlistsessionname">Alzheimer&#8217;s Dementia Recognition Through Spontaneous Speech</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-7|SESSION Wed-1-7 — Speaker Recognition Challenges and Applications]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition Challenges and Applications</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-8|SESSION Wed-1-8 — Applications of ASR]]</div> |^<div class="cpsessionlistsessionname">Applications of ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-9|SESSION Wed-1-9 — Speech Emotion Recognition II]]</div> |^<div class="cpsessionlistsessionname">Speech Emotion Recognition II</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-10|SESSION Wed-1-10 — Bi- and Multilinguality]]</div> |^<div class="cpsessionlistsessionname">Bi- and Multilinguality</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-1-11|SESSION Wed-1-11 — Single-Channel Speech Enhancement I]]</div> |^<div class="cpsessionlistsessionname">Single-Channel Speech Enhancement I</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-1-12|SESSION Wed-SS-1-12 — Deep Noise Suppression Challenge]]</div> |^<div class="cpsessionlistsessionname">Deep Noise Suppression Challenge</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-1|SESSION Wed-2-1 — Voice and Hearing Disorders]]</div> |^<div class="cpsessionlistsessionname">Voice and Hearing Disorders</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-2|SESSION Wed-2-2 — Spoken Term Detection]]</div> |^<div class="cpsessionlistsessionname">Spoken Term Detection</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-2-3|SESSION Wed-SS-2-3 — The Fearless Steps Challenge Phase-02]]</div> |^<div class="cpsessionlistsessionname">The Fearless Steps Challenge Phase-02</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-4|SESSION Wed-2-4 — Monaural Source Separation]]</div> |^<div class="cpsessionlistsessionname">Monaural Source Separation</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-5|SESSION Wed-2-5 — Single-Channel Speech Enhancement II]]</div> |^<div class="cpsessionlistsessionname">Single-Channel Speech Enhancement II</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-6|SESSION Wed-2-6 — Topics in ASR II]]</div> |^<div class="cpsessionlistsessionname">Topics in ASR II</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-2-7|SESSION Wed-SS-2-7 — Neural Signals for Spoken Communication]]</div> |^<div class="cpsessionlistsessionname">Neural Signals for Spoken Communication</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-8|SESSION Wed-2-8 — Training Strategies for ASR]]</div> |^<div class="cpsessionlistsessionname">Training Strategies for ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-9|SESSION Wed-2-9 — Speech Transmission & Coding]]</div> |^<div class="cpsessionlistsessionname">Speech Transmission &amp; Coding</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-10|SESSION Wed-2-10 — Bioacoustics and Articulation]]</div> |^<div class="cpsessionlistsessionname">Bioacoustics and Articulation</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-11|SESSION Wed-2-11 — Speech Synthesis: Multilingual and Cross-Lingual Approaches]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Multilingual and Cross-Lingual Approaches</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-2-12|SESSION Wed-2-12 — Learning Techniques for Speaker Recognition I]]</div> |^<div class="cpsessionlistsessionname">Learning Techniques for Speaker Recognition I</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-1|SESSION Wed-3-1 — Pronunciation]]</div> |^<div class="cpsessionlistsessionname">Pronunciation</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-2|SESSION Wed-3-2 — Diarization]]</div> |^<div class="cpsessionlistsessionname">Diarization</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-3|SESSION Wed-3-3 — Computational Paralinguistics II]]</div> |^<div class="cpsessionlistsessionname">Computational Paralinguistics II</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-4|SESSION Wed-3-4 — Speech Synthesis Paradigms and Methods II]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis Paradigms and Methods II</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-5|SESSION Wed-3-5 — Speaker Embedding]]</div> |^<div class="cpsessionlistsessionname">Speaker Embedding</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-7|SESSION Wed-3-7 — Single-Channel Speech Enhancement III]]</div> |^<div class="cpsessionlistsessionname">Single-Channel Speech Enhancement III</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-8|SESSION Wed-3-8 — Multi-Channel Audio and Emotion Recognition]]</div> |^<div class="cpsessionlistsessionname">Multi-Channel Audio and Emotion Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-9|SESSION Wed-3-9 — Computational Resource Constrained Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Computational Resource Constrained Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-10|SESSION Wed-3-10 — Speech Synthesis: Prosody and Emotion]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Prosody and Emotion</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-3-11|SESSION Wed-SS-3-11 — The Interspeech 2020 Far Field Speaker Verification Challenge]]</div> |^<div class="cpsessionlistsessionname">The Interspeech 2020 Far Field Speaker Verification Challenge</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-3-12|SESSION Wed-3-12 — Multimodal Speech Processing]]</div> |^<div class="cpsessionlistsessionname">Multimodal Speech Processing</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-1|SESSION Thu-1-1 — Speech Synthesis: Neural Waveform Generation II]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Neural Waveform Generation II</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-2|SESSION Thu-1-2 — ASR Neural Network Architectures and Training II]]</div> |^<div class="cpsessionlistsessionname">ASR Neural Network Architectures and Training II</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-3|SESSION Thu-1-3 — Neural Networks for Language Modeling]]</div> |^<div class="cpsessionlistsessionname">Neural Networks for Language Modeling</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-4|SESSION Thu-1-4 — Phonetic Event Detection and Segmentation]]</div> |^<div class="cpsessionlistsessionname">Phonetic Event Detection and Segmentation</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-5|SESSION Thu-1-5 — Human Speech Production II]]</div> |^<div class="cpsessionlistsessionname">Human Speech Production II</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-SS-1-6|SESSION Thu-SS-1-6 — New Trends in Self-Supervised Speech Processing]]</div> |^<div class="cpsessionlistsessionname">New Trends in Self-Supervised Speech Processing</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-7|SESSION Thu-1-7 — Learning Techniques for Speaker Recognition II]]</div> |^<div class="cpsessionlistsessionname">Learning Techniques for Speaker Recognition II</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-8|SESSION Thu-1-8 — Spoken Language Evaluation]]</div> |^<div class="cpsessionlistsessionname">Spoken Language Evaluation</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-9|SESSION Thu-1-9 — Spoken Dialogue System]]</div> |^<div class="cpsessionlistsessionname">Spoken Dialogue System</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-10|SESSION Thu-1-10 — Dereverberation and Echo Cancellation]]</div> |^<div class="cpsessionlistsessionname">Dereverberation and Echo Cancellation</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-1-11|SESSION Thu-1-11 — Speech Synthesis: Toward End-to-End Synthesis]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Toward End-to-End Synthesis</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-1|SESSION Thu-2-1 — Speech Enhancement, Bandwidth Extension and Hearing Aids]]</div> |^<div class="cpsessionlistsessionname">Speech Enhancement, Bandwidth Extension and Hearing Aids</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-2|SESSION Thu-2-2 — Speech Emotion Recognition III]]</div> |^<div class="cpsessionlistsessionname">Speech Emotion Recognition III</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-3|SESSION Thu-2-3 — Accoustic Phonetics of L1-L2 and Other Interactions]]</div> |^<div class="cpsessionlistsessionname">Accoustic Phonetics of L1-L2 and Other Interactions</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-4|SESSION Thu-2-4 — Conversational Systems]]</div> |^<div class="cpsessionlistsessionname">Conversational Systems</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-SS-2-5|SESSION Thu-SS-2-5 — The Attacker’s Perpective on Automatic Speaker Verification]]</div> |^<div class="cpsessionlistsessionname">The Attacker&#8217;s Perpective on Automatic Speaker Verification</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-6|SESSION Thu-2-6 — Summarization, Semantic Analysis and Classification]]</div> |^<div class="cpsessionlistsessionname">Summarization, Semantic Analysis and Classification</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-7|SESSION Thu-2-7 — Speaker Recognition II]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition II</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-8|SESSION Thu-2-8 — General Topics in Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">General Topics in Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-9|SESSION Thu-2-9 — Speech Synthesis: Prosody Modeling]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Prosody Modeling</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-10|SESSION Thu-2-10 — Language Learning]]</div> |^<div class="cpsessionlistsessionname">Language Learning</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-2-11|SESSION Thu-2-11 — Speech Enhancement]]</div> |^<div class="cpsessionlistsessionname">Speech Enhancement</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-1|SESSION Thu-3-1 — Speech in Health II]]</div> |^<div class="cpsessionlistsessionname">Speech in Health II</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-2|SESSION Thu-3-2 — Speech and Audio Quality Assessment]]</div> |^<div class="cpsessionlistsessionname">Speech and Audio Quality Assessment</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-3|SESSION Thu-3-3 — Privacy and Security in Speech Communication]]</div> |^<div class="cpsessionlistsessionname">Privacy and Security in Speech Communication</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-4|SESSION Thu-3-4 — Voice Conversion and Adaptation II]]</div> |^<div class="cpsessionlistsessionname">Voice Conversion and Adaptation II</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-5|SESSION Thu-3-5 — Multilingual and Code-Switched ASR]]</div> |^<div class="cpsessionlistsessionname">Multilingual and Code-Switched ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-6|SESSION Thu-3-6 — Speech and Voice Disorders]]</div> |^<div class="cpsessionlistsessionname">Speech and Voice Disorders</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-7|SESSION Thu-3-7 — The Zero Resource Speech Challenge 2020]]</div> |^<div class="cpsessionlistsessionname">The Zero Resource Speech Challenge 2020</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-8|SESSION Thu-3-8 — LM Adaptation, Lexical Units and Punctuation]]</div> |^<div class="cpsessionlistsessionname">LM Adaptation, Lexical Units and Punctuation</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-9|SESSION Thu-3-9 — Speech in Health I]]</div> |^<div class="cpsessionlistsessionname">Speech in Health I</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-10|SESSION Thu-3-10 — ASR Neural Network Architectures II — Transformers]]</div> |^<div class="cpsessionlistsessionname">ASR Neural Network Architectures II &#8212; Transformers</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-3-11|SESSION Thu-3-11 — Spatial Audio]]</div> |^<div class="cpsessionlistsessionname">Spatial Audio</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Ralf Schlüter|
|^&nbsp;|^Yanhua Long|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-1|PAPER Mon-1-1-1 — On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">On the Comparison of Popular End-to-End Models for Large Scale Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jinyu Li|AUTHOR Jinyu Li]], [[Yu Wu|AUTHOR Yu Wu]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Chengyi Wang|AUTHOR Chengyi Wang]], [[Rui Zhao|AUTHOR Rui Zhao]], [[Shujie Liu|AUTHOR Shujie Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2471.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-2|PAPER Mon-1-1-2 — SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">SAN-M: Memory Equipped Self-Attention for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zhifu Gao|AUTHOR Zhifu Gao]], [[Shiliang Zhang|AUTHOR Shiliang Zhang]], [[Ming Lei|AUTHOR Ming Lei]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-3|PAPER Mon-1-1-3 — Contextual RNN-T for Open Domain ASR]]</div>|<div class="cpsessionviewpapertitle">Contextual RNN-T for Open Domain ASR</div><div class="cpsessionviewpaperauthor">[[Mahaveer Jain|AUTHOR Mahaveer Jain]], [[Gil Keren|AUTHOR Gil Keren]], [[Jay Mahadeokar|AUTHOR Jay Mahadeokar]], [[Geoffrey Zweig|AUTHOR Geoffrey Zweig]], [[Florian Metze|AUTHOR Florian Metze]], [[Yatharth Saraf|AUTHOR Yatharth Saraf]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2947.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-4|PAPER Mon-1-1-4 — ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">ASAPP-ASR: Multistream CNN and Self-Attentive SRU for SOTA Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jing Pan|AUTHOR Jing Pan]], [[Joshua Shapiro|AUTHOR Joshua Shapiro]], [[Jeremy Wohlwend|AUTHOR Jeremy Wohlwend]], [[Kyu J. Han|AUTHOR Kyu J. Han]], [[Tao Lei|AUTHOR Tao Lei]], [[Tao Ma|AUTHOR Tao Ma]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-5|PAPER Mon-1-1-5 — Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity]]</div>|<div class="cpsessionviewpapertitle">Compressing LSTM Networks with Hierarchical Coarse-Grain Sparsity</div><div class="cpsessionviewpaperauthor">[[Deepak Kadetotad|AUTHOR Deepak Kadetotad]], [[Jian Meng|AUTHOR Jian Meng]], [[Visar Berisha|AUTHOR Visar Berisha]], [[Chaitali Chakrabarti|AUTHOR Chaitali Chakrabarti]], [[Jae-sun Seo|AUTHOR Jae-sun Seo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2560.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-6|PAPER Mon-1-1-6 — BLSTM-Driven Stream Fusion for Automatic Speech Recognition: Novel Methods and a Multi-Size Window Fusion Example]]</div>|<div class="cpsessionviewpapertitle">BLSTM-Driven Stream Fusion for Automatic Speech Recognition: Novel Methods and a Multi-Size Window Fusion Example</div><div class="cpsessionviewpaperauthor">[[Timo Lohrenz|AUTHOR Timo Lohrenz]], [[Tim Fingscheidt|AUTHOR Tim Fingscheidt]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2526.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-7|PAPER Mon-1-1-7 — Relative Positional Encoding for Speech Recognition and Direct Translation]]</div>|<div class="cpsessionviewpapertitle">Relative Positional Encoding for Speech Recognition and Direct Translation</div><div class="cpsessionviewpaperauthor">[[Ngoc-Quan Pham|AUTHOR Ngoc-Quan Pham]], [[Thanh-Le Ha|AUTHOR Thanh-Le Ha]], [[Tuan-Nam Nguyen|AUTHOR Tuan-Nam Nguyen]], [[Thai-Son Nguyen|AUTHOR Thai-Son Nguyen]], [[Elizabeth Salesky|AUTHOR Elizabeth Salesky]], [[Sebastian Stüker|AUTHOR Sebastian Stüker]], [[Jan Niehues|AUTHOR Jan Niehues]], [[Alex Waibel|AUTHOR Alex Waibel]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-8|PAPER Mon-1-1-8 — Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers]]</div>|<div class="cpsessionviewpapertitle">Joint Speaker Counting, Speech Recognition, and Speaker Identification for Overlapped Speech of any Number of Speakers</div><div class="cpsessionviewpaperauthor">[[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Xiaofei Wang|AUTHOR Xiaofei Wang]], [[Zhong Meng|AUTHOR Zhong Meng]], [[Zhuo Chen|AUTHOR Zhuo Chen]], [[Tianyan Zhou|AUTHOR Tianyan Zhou]], [[Takuya Yoshioka|AUTHOR Takuya Yoshioka]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1575.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-9|PAPER Mon-1-1-9 — Implicit Transfer of Privileged Acoustic Information in a Generalized Knowledge Distillation Framework]]</div>|<div class="cpsessionviewpapertitle">Implicit Transfer of Privileged Acoustic Information in a Generalized Knowledge Distillation Framework</div><div class="cpsessionviewpaperauthor">[[Takashi Fukuda|AUTHOR Takashi Fukuda]], [[Samuel Thomas|AUTHOR Samuel Thomas]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3163.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-1-10|PAPER Mon-1-1-10 — Effect of Adding Positional Information on Convolutional Neural Networks for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Effect of Adding Positional Information on Convolutional Neural Networks for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jinhwan Park|AUTHOR Jinhwan Park]], [[Wonyong Sung|AUTHOR Wonyong Sung]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Shuai Nie|
|^&nbsp;|^Qiang Fang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-1|PAPER Mon-1-10-1 — ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment]]</div>|<div class="cpsessionviewpapertitle">ATCSpeech: A Multilingual Pilot-Controller Speech Corpus from Real Air Traffic Control Environment</div><div class="cpsessionviewpaperauthor">[[Bo Yang|AUTHOR Bo Yang]], [[Xianlong Tan|AUTHOR Xianlong Tan]], [[Zhengmao Chen|AUTHOR Zhengmao Chen]], [[Bing Wang|AUTHOR Bing Wang]], [[Min Ruan|AUTHOR Min Ruan]], [[Dan Li|AUTHOR Dan Li]], [[Zhongping Yang|AUTHOR Zhongping Yang]], [[Xiping Wu|AUTHOR Xiping Wu]], [[Yi Lin|AUTHOR Yi Lin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-2|PAPER Mon-1-10-2 — Developing an Open-Source Corpus of Yoruba Speech]]</div>|<div class="cpsessionviewpapertitle">Developing an Open-Source Corpus of Yoruba Speech</div><div class="cpsessionviewpaperauthor">[[Alexander Gutkin|AUTHOR Alexander Gutkin]], [[Işın Demirşahin|AUTHOR Işın Demirşahin]], [[Oddur Kjartansson|AUTHOR Oddur Kjartansson]], [[Clara Rivera|AUTHOR Clara Rivera]], [[Kọ́lá Túbọ̀sún|AUTHOR Kọ́lá Túbọ̀sún]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-3|PAPER Mon-1-10-3 — ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers]]</div>|<div class="cpsessionviewpapertitle">ClovaCall: Korean Goal-Oriented Dialog Speech Corpus for Automatic Speech Recognition of Contact Centers</div><div class="cpsessionviewpaperauthor">[[Jung-Woo Ha|AUTHOR Jung-Woo Ha]], [[Kihyun Nam|AUTHOR Kihyun Nam]], [[Jingu Kang|AUTHOR Jingu Kang]], [[Sang-Woo Lee|AUTHOR Sang-Woo Lee]], [[Sohee Yang|AUTHOR Sohee Yang]], [[Hyunhoon Jung|AUTHOR Hyunhoon Jung]], [[Hyeji Kim|AUTHOR Hyeji Kim]], [[Eunmi Kim|AUTHOR Eunmi Kim]], [[Soojin Kim|AUTHOR Soojin Kim]], [[Hyun Ah Kim|AUTHOR Hyun Ah Kim]], [[Kyoungtae Doh|AUTHOR Kyoungtae Doh]], [[Chan Kyu Lee|AUTHOR Chan Kyu Lee]], [[Nako Sung|AUTHOR Nako Sung]], [[Sunghun Kim|AUTHOR Sunghun Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-4|PAPER Mon-1-10-4 — LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR]]</div>|<div class="cpsessionviewpapertitle">LAIX Corpus of Chinese Learner English: Towards a Benchmark for L2 English ASR</div><div class="cpsessionviewpaperauthor">[[Yanhong Wang|AUTHOR Yanhong Wang]], [[Huan Luan|AUTHOR Huan Luan]], [[Jiahong Yuan|AUTHOR Jiahong Yuan]], [[Bin Wang|AUTHOR Bin Wang]], [[Hui Lin|AUTHOR Hui Lin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1988.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-5|PAPER Mon-1-10-5 — Design and Development of a Human-Machine Dialog Corpus for the Automated Assessment of Conversational English Proficiency]]</div>|<div class="cpsessionviewpapertitle">Design and Development of a Human-Machine Dialog Corpus for the Automated Assessment of Conversational English Proficiency</div><div class="cpsessionviewpaperauthor">[[Vikram Ramanarayanan|AUTHOR Vikram Ramanarayanan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-6|PAPER Mon-1-10-6 — CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment]]</div>|<div class="cpsessionviewpapertitle">CUCHILD: A Large-Scale Cantonese Corpus of Child Speech for Phonology and Articulation Assessment</div><div class="cpsessionviewpaperauthor">[[Si-Ioi Ng|AUTHOR Si-Ioi Ng]], [[Cymie Wing-Yee Ng|AUTHOR Cymie Wing-Yee Ng]], [[Jiarui Wang|AUTHOR Jiarui Wang]], [[Tan Lee|AUTHOR Tan Lee]], [[Kathy Yuet-Sheung Lee|AUTHOR Kathy Yuet-Sheung Lee]], [[Michael Chi-Fai Tong|AUTHOR Michael Chi-Fai Tong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-7|PAPER Mon-1-10-7 — FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics]]</div>|<div class="cpsessionviewpapertitle">FinChat: Corpus and Evaluation Setup for Finnish Chat Conversations on Everyday Topics</div><div class="cpsessionviewpaperauthor">[[Katri Leino|AUTHOR Katri Leino]], [[Juho Leinonen|AUTHOR Juho Leinonen]], [[Mittul Singh|AUTHOR Mittul Singh]], [[Sami Virpioja|AUTHOR Sami Virpioja]], [[Mikko Kurimo|AUTHOR Mikko Kurimo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-8|PAPER Mon-1-10-8 — DiPCo — Dinner Party Corpus]]</div>|<div class="cpsessionviewpapertitle">DiPCo — Dinner Party Corpus</div><div class="cpsessionviewpaperauthor">[[Maarten Van Segbroeck|AUTHOR Maarten Van Segbroeck]], [[Ahmed Zaid|AUTHOR Ahmed Zaid]], [[Ksenia Kutsenko|AUTHOR Ksenia Kutsenko]], [[Cirenia Huerta|AUTHOR Cirenia Huerta]], [[Tinh Nguyen|AUTHOR Tinh Nguyen]], [[Xuewen Luo|AUTHOR Xuewen Luo]], [[Björn Hoffmeister|AUTHOR Björn Hoffmeister]], [[Jan Trmal|AUTHOR Jan Trmal]], [[Maurizio Omologo|AUTHOR Maurizio Omologo]], [[Roland Maas|AUTHOR Roland Maas]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3040.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-9|PAPER Mon-1-10-9 — Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews]]</div>|<div class="cpsessionviewpapertitle">Learning to Detect Bipolar Disorder and Borderline Personality Disorder with Language and Speech in Non-Clinical Interviews</div><div class="cpsessionviewpaperauthor">[[Bo Wang|AUTHOR Bo Wang]], [[Yue Wu|AUTHOR Yue Wu]], [[Niall Taylor|AUTHOR Niall Taylor]], [[Terry Lyons|AUTHOR Terry Lyons]], [[Maria Liakata|AUTHOR Maria Liakata]], [[Alejo J. Nevado-Holgado|AUTHOR Alejo J. Nevado-Holgado]], [[Kate E.A. Saunders|AUTHOR Kate E.A. Saunders]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-10-10|PAPER Mon-1-10-10 — FT SPEECH: Danish Parliament Speech Corpus]]</div>|<div class="cpsessionviewpapertitle">FT SPEECH: Danish Parliament Speech Corpus</div><div class="cpsessionviewpaperauthor">[[Andreas Kirkedal|AUTHOR Andreas Kirkedal]], [[Marija Stepanović|AUTHOR Marija Stepanović]], [[Barbara Plank|AUTHOR Barbara Plank]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 11|<|
|^Chairs:&nbsp;|^Sriram Ganapathy|
|^&nbsp;|^Dong Wang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1708.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-11-1|PAPER Mon-1-11-1 — Metric Learning Loss Functions to Reduce Domain Mismatch in the x-Vector Space for Language Recognition]]</div>|<div class="cpsessionviewpapertitle">Metric Learning Loss Functions to Reduce Domain Mismatch in the x-Vector Space for Language Recognition</div><div class="cpsessionviewpaperauthor">[[Raphaël Duroselle|AUTHOR Raphaël Duroselle]], [[Denis Jouvet|AUTHOR Denis Jouvet]], [[Irina Illina|AUTHOR Irina Illina]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1923.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-11-2|PAPER Mon-1-11-2 — The XMUSPEECH System for the AP19-OLR Challenge]]</div>|<div class="cpsessionviewpapertitle">The XMUSPEECH System for the AP19-OLR Challenge</div><div class="cpsessionviewpaperauthor">[[Zheng Li|AUTHOR Zheng Li]], [[Miao Zhao|AUTHOR Miao Zhao]], [[Jing Li|AUTHOR Jing Li]], [[Yiming Zhi|AUTHOR Yiming Zhi]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1960.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-11-3|PAPER Mon-1-11-3 — On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification]]</div>|<div class="cpsessionviewpapertitle">On the Usage of Multi-Feature Integration for Speaker Verification and Language Identification</div><div class="cpsessionviewpaperauthor">[[Zheng Li|AUTHOR Zheng Li]], [[Miao Zhao|AUTHOR Miao Zhao]], [[Jing Li|AUTHOR Jing Li]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-11-4|PAPER Mon-1-11-4 — What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?]]</div>|<div class="cpsessionviewpapertitle">What Does an End-to-End Dialect Identification Model Learn About Non-Dialectal Information?</div><div class="cpsessionviewpaperauthor">[[Shammur A. Chowdhury|AUTHOR Shammur A. Chowdhury]], [[Ahmed Ali|AUTHOR Ahmed Ali]], [[Suwon Shon|AUTHOR Suwon Shon]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2706.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-11-5|PAPER Mon-1-11-5 — Releasing a Toolkit and Comparing the Performance of Language Embeddings Across Various Spoken Language Identification Datasets]]</div>|<div class="cpsessionviewpapertitle">Releasing a Toolkit and Comparing the Performance of Language Embeddings Across Various Spoken Language Identification Datasets</div><div class="cpsessionviewpaperauthor">[[Matias Lindgren|AUTHOR Matias Lindgren]], [[Tommi Jauhiainen|AUTHOR Tommi Jauhiainen]], [[Mikko Kurimo|AUTHOR Mikko Kurimo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2906.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-11-6|PAPER Mon-1-11-6 — Learning Intonation Pattern Embeddings for Arabic Dialect Identification]]</div>|<div class="cpsessionviewpapertitle">Learning Intonation Pattern Embeddings for Arabic Dialect Identification</div><div class="cpsessionviewpaperauthor">[[Aitor Arronte Alvarez|AUTHOR Aitor Arronte Alvarez]], [[Elsayed Sabry Abdelaal Issa|AUTHOR Elsayed Sabry Abdelaal Issa]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-11-7|PAPER Mon-1-11-7 — Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages]]</div>|<div class="cpsessionviewpapertitle">Cross-Domain Adaptation of Spoken Language Identification for Related Languages: The Curious Case of Slavic Languages</div><div class="cpsessionviewpaperauthor">[[Badr M. Abdullah|AUTHOR Badr M. Abdullah]], [[Tania Avgustinova|AUTHOR Tania Avgustinova]], [[Bernd Möbius|AUTHOR Bernd Möbius]], [[Dietrich Klakow|AUTHOR Dietrich Klakow]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 12|<|
|^Chair:&nbsp;|^Ji Wu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-12-1|PAPER Mon-1-12-1 — ICE-Talk: An Interface for a Controllable Expressive Talking Machine]]</div>|<div class="cpsessionviewpapertitle">ICE-Talk: An Interface for a Controllable Expressive Talking Machine</div><div class="cpsessionviewpaperauthor">[[Noé Tits|AUTHOR Noé Tits]], [[Kevin El Haddad|AUTHOR Kevin El Haddad]], [[Thierry Dutoit|AUTHOR Thierry Dutoit]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-12-2|PAPER Mon-1-12-2 — Kaldi-Web: An Installation-Free, On-Device Speech Recognition System]]</div>|<div class="cpsessionviewpapertitle">Kaldi-Web: An Installation-Free, On-Device Speech Recognition System</div><div class="cpsessionviewpaperauthor">[[Mathieu Hu|AUTHOR Mathieu Hu]], [[Laurent Pierron|AUTHOR Laurent Pierron]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]], [[Denis Jouvet|AUTHOR Denis Jouvet]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-12-3|PAPER Mon-1-12-3 — Soapbox Labs Verification Platform for Child Speech]]</div>|<div class="cpsessionviewpapertitle">Soapbox Labs Verification Platform for Child Speech</div><div class="cpsessionviewpaperauthor">[[Amelia C. Kelly|AUTHOR Amelia C. Kelly]], [[Eleni Karamichali|AUTHOR Eleni Karamichali]], [[Armin Saeb|AUTHOR Armin Saeb]], [[Karel Veselý|AUTHOR Karel Veselý]], [[Nicholas Parslow|AUTHOR Nicholas Parslow]], [[Agape Deng|AUTHOR Agape Deng]], [[Arnaud Letondor|AUTHOR Arnaud Letondor]], [[Robert O’Regan|AUTHOR Robert O’Regan]], [[Qiru Zhou|AUTHOR Qiru Zhou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-12-4|PAPER Mon-1-12-4 — SoapBox Labs Fluency Assessment Platform for Child Speech]]</div>|<div class="cpsessionviewpapertitle">SoapBox Labs Fluency Assessment Platform for Child Speech</div><div class="cpsessionviewpaperauthor">[[Amelia C. Kelly|AUTHOR Amelia C. Kelly]], [[Eleni Karamichali|AUTHOR Eleni Karamichali]], [[Armin Saeb|AUTHOR Armin Saeb]], [[Karel Veselý|AUTHOR Karel Veselý]], [[Nicholas Parslow|AUTHOR Nicholas Parslow]], [[Gloria Montoya Gomez|AUTHOR Gloria Montoya Gomez]], [[Agape Deng|AUTHOR Agape Deng]], [[Arnaud Letondor|AUTHOR Arnaud Letondor]], [[Niall Mullally|AUTHOR Niall Mullally]], [[Adrian Hempel|AUTHOR Adrian Hempel]], [[Robert O’Regan|AUTHOR Robert O’Regan]], [[Qiru Zhou|AUTHOR Qiru Zhou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4009.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-12-5|PAPER Mon-1-12-5 — CATOTRON — A Neural Text-to-Speech System in Catalan]]</div>|<div class="cpsessionviewpapertitle">CATOTRON — A Neural Text-to-Speech System in Catalan</div><div class="cpsessionviewpaperauthor">[[Baybars Külebi|AUTHOR Baybars Külebi]], [[Alp Öktem|AUTHOR Alp Öktem]], [[Alex Peiró-Lilja|AUTHOR Alex Peiró-Lilja]], [[Santiago Pascual|AUTHOR Santiago Pascual]], [[Mireia Farrús|AUTHOR Mireia Farrús]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-12-6|PAPER Mon-1-12-6 — Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology]]</div>|<div class="cpsessionviewpapertitle">Toward Remote Patient Monitoring of Speech, Video, Cognitive and Respiratory Biomarkers Using Multimodal Dialog Technology</div><div class="cpsessionviewpaperauthor">[[Vikram Ramanarayanan|AUTHOR Vikram Ramanarayanan]], [[Oliver Roesler|AUTHOR Oliver Roesler]], [[Michael Neumann|AUTHOR Michael Neumann]], [[David Pautler|AUTHOR David Pautler]], [[Doug Habberstad|AUTHOR Doug Habberstad]], [[Andrew Cornish|AUTHOR Andrew Cornish]], [[Hardik Kothare|AUTHOR Hardik Kothare]], [[Vignesh Murali|AUTHOR Vignesh Murali]], [[Jackson Liscombe|AUTHOR Jackson Liscombe]], [[Dirk Schnelle-Walka|AUTHOR Dirk Schnelle-Walka]], [[Patrick Lange|AUTHOR Patrick Lange]], [[David Suendermann-Oeft|AUTHOR David Suendermann-Oeft]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4014.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-12-7|PAPER Mon-1-12-7 — VoiceID on the Fly: A Speaker Recognition System that Learns from Scratch]]</div>|<div class="cpsessionviewpapertitle">VoiceID on the Fly: A Speaker Recognition System that Learns from Scratch</div><div class="cpsessionviewpaperauthor">[[Baihan Lin|AUTHOR Baihan Lin]], [[Xinxin Zhang|AUTHOR Xinxin Zhang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Ying-Hui Lai|
|^&nbsp;|^Xiaolei Zhang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-1|PAPER Mon-1-2-1 — Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Deep Neural Network-Based Generalized Sidelobe Canceller for Robust Multi-Channel Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Guanjun Li|AUTHOR Guanjun Li]], [[Shan Liang|AUTHOR Shan Liang]], [[Shuai Nie|AUTHOR Shuai Nie]], [[Wenju Liu|AUTHOR Wenju Liu]], [[Zhanlei Yang|AUTHOR Zhanlei Yang]], [[Longshuai Xiao|AUTHOR Longshuai Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-2|PAPER Mon-1-2-2 — Neural Spatio-Temporal Beamformer for Target Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Neural Spatio-Temporal Beamformer for Target Speech Separation</div><div class="cpsessionviewpaperauthor">[[Yong Xu|AUTHOR Yong Xu]], [[Meng Yu|AUTHOR Meng Yu]], [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]], [[Lianwu Chen|AUTHOR Lianwu Chen]], [[Chao Weng|AUTHOR Chao Weng]], [[Jianming Liu|AUTHOR Jianming Liu]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1484.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-3|PAPER Mon-1-2-3 — Online Directional Speech Enhancement Using Geometrically Constrained Independent Vector Analysis]]</div>|<div class="cpsessionviewpapertitle">Online Directional Speech Enhancement Using Geometrically Constrained Independent Vector Analysis</div><div class="cpsessionviewpaperauthor">[[Li Li|AUTHOR Li Li]], [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]], [[Shoji Makino|AUTHOR Shoji Makino]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-4|PAPER Mon-1-2-4 — End-to-End Multi-Look Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">End-to-End Multi-Look Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Meng Yu|AUTHOR Meng Yu]], [[Xuan Ji|AUTHOR Xuan Ji]], [[Bo Wu|AUTHOR Bo Wu]], [[Dan Su|AUTHOR Dan Su]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-5|PAPER Mon-1-2-5 — Differential Beamforming for Uniform Circular Array with Directional Microphones]]</div>|<div class="cpsessionviewpapertitle">Differential Beamforming for Uniform Circular Array with Directional Microphones</div><div class="cpsessionviewpaperauthor">[[Weilong Huang|AUTHOR Weilong Huang]], [[Jinwei Feng|AUTHOR Jinwei Feng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1900.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-6|PAPER Mon-1-2-6 — Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Exploring Deep Hybrid Tensor-to-Vector Network Architectures for Regression Based Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Jun Qi|AUTHOR Jun Qi]], [[Hu Hu|AUTHOR Hu Hu]], [[Yannan Wang|AUTHOR Yannan Wang]], [[Chao-Han Huck Yang|AUTHOR Chao-Han Huck Yang]], [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1981.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-7|PAPER Mon-1-2-7 — An End-to-End Architecture of Online Multi-Channel Speech Separation]]</div>|<div class="cpsessionviewpapertitle">An End-to-End Architecture of Online Multi-Channel Speech Separation</div><div class="cpsessionviewpaperauthor">[[Jian Wu|AUTHOR Jian Wu]], [[Zhuo Chen|AUTHOR Zhuo Chen]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Takuya Yoshioka|AUTHOR Takuya Yoshioka]], [[Zhili Tan|AUTHOR Zhili Tan]], [[Edward Lin|AUTHOR Edward Lin]], [[Yi Luo|AUTHOR Yi Luo]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-8|PAPER Mon-1-2-8 — Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation]]</div>|<div class="cpsessionviewpapertitle">Mentoring-Reverse Mentoring for Unsupervised Multi-Channel Speech Source Separation</div><div class="cpsessionviewpaperauthor">[[Yu Nakagome|AUTHOR Yu Nakagome]], [[Masahito Togami|AUTHOR Masahito Togami]], [[Tetsuji Ogawa|AUTHOR Tetsuji Ogawa]], [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2138.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-9|PAPER Mon-1-2-9 — Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation]]</div>|<div class="cpsessionviewpapertitle">Computationally Efficient and Versatile Framework for Joint Optimization of Blind Speech Separation and Dereverberation</div><div class="cpsessionviewpaperauthor">[[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]], [[Rintaro Ikeshita|AUTHOR Rintaro Ikeshita]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Hiroshi Sawada|AUTHOR Hiroshi Sawada]], [[Shoko Araki|AUTHOR Shoko Araki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-2-10|PAPER Mon-1-2-10 — A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge]]</div>|<div class="cpsessionviewpapertitle">A Space-and-Speaker-Aware Iterative Mask Estimation Approach to Multi-Channel Speech Recognition in the CHiME-6 Challenge</div><div class="cpsessionviewpaperauthor">[[Yan-Hui Tu|AUTHOR Yan-Hui Tu]], [[Jun Du|AUTHOR Jun Du]], [[Lei Sun|AUTHOR Lei Sun]], [[Feng Ma|AUTHOR Feng Ma]], [[Jia Pan|AUTHOR Jia Pan]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Hans Rutger Bosker|
|^&nbsp;|^Haifeng Li|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-3-1|PAPER Mon-1-3-1 — Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation]]</div>|<div class="cpsessionviewpapertitle">Identifying Causal Relationships Between Behavior and Local Brain Activity During Natural Conversation</div><div class="cpsessionviewpaperauthor">[[Hmamouche Youssef|AUTHOR Hmamouche Youssef]], [[Prévot Laurent|AUTHOR Prévot Laurent]], [[Ochs Magalie|AUTHOR Ochs Magalie]], [[Chaminade Thierry|AUTHOR Chaminade Thierry]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-3-2|PAPER Mon-1-3-2 — Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals]]</div>|<div class="cpsessionviewpapertitle">Neural Entrainment to Natural Speech Envelope Based on Subject Aligned EEG Signals</div><div class="cpsessionviewpaperauthor">[[Di Zhou|AUTHOR Di Zhou]], [[Gaoyan Zhang|AUTHOR Gaoyan Zhang]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Shuang Wu|AUTHOR Shuang Wu]], [[Zhuo Zhang|AUTHOR Zhuo Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2490.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-3-3|PAPER Mon-1-3-3 — Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell]]</div>|<div class="cpsessionviewpapertitle">Does Lexical Retrieval Deteriorate in Patients with Mild Cognitive Impairment? Analysis of Brain Functional Network Will Tell</div><div class="cpsessionviewpaperauthor">[[Chongyuan Lian|AUTHOR Chongyuan Lian]], [[Tianqi Wang|AUTHOR Tianqi Wang]], [[Mingxiao Gu|AUTHOR Mingxiao Gu]], [[Manwa L. Ng|AUTHOR Manwa L. Ng]], [[Feiqi Zhu|AUTHOR Feiqi Zhu]], [[Lan Wang|AUTHOR Lan Wang]], [[Nan Yan|AUTHOR Nan Yan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-3-4|PAPER Mon-1-3-4 — Congruent Audiovisual Speech Enhances Cortical Envelope Tracking During Auditory Selective Attention]]</div>|<div class="cpsessionviewpapertitle">Congruent Audiovisual Speech Enhances Cortical Envelope Tracking During Auditory Selective Attention</div><div class="cpsessionviewpaperauthor">[[Zhen Fu|AUTHOR Zhen Fu]], [[Jing Chen|AUTHOR Jing Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1652.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-3-5|PAPER Mon-1-3-5 — Contribution of RMS-Level-Based Speech Segments to Target Speech Decoding Under Noisy Conditions]]</div>|<div class="cpsessionviewpapertitle">Contribution of RMS-Level-Based Speech Segments to Target Speech Decoding Under Noisy Conditions</div><div class="cpsessionviewpaperauthor">[[Lei Wang|AUTHOR Lei Wang]], [[Ed X. Wu|AUTHOR Ed X. Wu]], [[Fei Chen|AUTHOR Fei Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1633.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-3-6|PAPER Mon-1-3-6 — Cortical Oscillatory Hierarchy for Natural Sentence Processing]]</div>|<div class="cpsessionviewpapertitle">Cortical Oscillatory Hierarchy for Natural Sentence Processing</div><div class="cpsessionviewpaperauthor">[[Bin Zhao|AUTHOR Bin Zhao]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Gaoyan Zhang|AUTHOR Gaoyan Zhang]], [[Masashi Unoki|AUTHOR Masashi Unoki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2450.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-3-7|PAPER Mon-1-3-7 — Comparing EEG Analyses with Different Epoch Alignments in an Auditory Lexical Decision Experiment]]</div>|<div class="cpsessionviewpapertitle">Comparing EEG Analyses with Different Epoch Alignments in an Auditory Lexical Decision Experiment</div><div class="cpsessionviewpaperauthor">[[Louis ten Bosch|AUTHOR Louis ten Bosch]], [[Kimberley Mulder|AUTHOR Kimberley Mulder]], [[Lou Boves|AUTHOR Lou Boves]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2651.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-3-8|PAPER Mon-1-3-8 — Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait]]</div>|<div class="cpsessionviewpapertitle">Detection of Subclinical Mild Traumatic Brain Injury (mTBI) Through Speech and Gait</div><div class="cpsessionviewpaperauthor">[[Tanya Talkar|AUTHOR Tanya Talkar]], [[Sophia Yuditskaya|AUTHOR Sophia Yuditskaya]], [[James R. Williamson|AUTHOR James R. Williamson]], [[Adam C. Lammert|AUTHOR Adam C. Lammert]], [[Hrishikesh Rao|AUTHOR Hrishikesh Rao]], [[Daniel Hannon|AUTHOR Daniel Hannon]], [[Anne O’Brien|AUTHOR Anne O’Brien]], [[Gloria Vergara-Diaz|AUTHOR Gloria Vergara-Diaz]], [[Richard DeLaura|AUTHOR Richard DeLaura]], [[Douglas Sturim|AUTHOR Douglas Sturim]], [[Gregory Ciccarelli|AUTHOR Gregory Ciccarelli]], [[Ross Zafonte|AUTHOR Ross Zafonte]], [[Jeffrey Palmer|AUTHOR Jeffrey Palmer]], [[Paolo Bonato|AUTHOR Paolo Bonato]], [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Reinhold Haeb-Umbach|
|^&nbsp;|^Ken-Ichi Sakakibara|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-1|PAPER Mon-1-4-1 — Towards Learning a Universal Non-Semantic Representation of Speech]]</div>|<div class="cpsessionviewpapertitle">Towards Learning a Universal Non-Semantic Representation of Speech</div><div class="cpsessionviewpaperauthor">[[Joel Shor|AUTHOR Joel Shor]], [[Aren Jansen|AUTHOR Aren Jansen]], [[Ronnie Maor|AUTHOR Ronnie Maor]], [[Oran Lang|AUTHOR Oran Lang]], [[Omry Tuval|AUTHOR Omry Tuval]], [[Félix de Chaumont Quitry|AUTHOR Félix de Chaumont Quitry]], [[Marco Tagliasacchi|AUTHOR Marco Tagliasacchi]], [[Ira Shavitt|AUTHOR Ira Shavitt]], [[Dotan Emanuel|AUTHOR Dotan Emanuel]], [[Yinnon Haviv|AUTHOR Yinnon Haviv]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1794.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-2|PAPER Mon-1-4-2 — Poetic Meter Classification Using i-Vector-MTF Fusion]]</div>|<div class="cpsessionviewpapertitle">Poetic Meter Classification Using i-Vector-MTF Fusion</div><div class="cpsessionviewpaperauthor">[[Rajeev Rajan|AUTHOR Rajeev Rajan]], [[Aiswarya Vinod Kumar|AUTHOR Aiswarya Vinod Kumar]], [[Ben P. Babu|AUTHOR Ben P. Babu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-3|PAPER Mon-1-4-3 — Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism]]</div>|<div class="cpsessionviewpapertitle">Formant Tracking Using Dilated Convolutional Networks Through Dense Connection with Gating Mechanism</div><div class="cpsessionviewpaperauthor">[[Wang Dai|AUTHOR Wang Dai]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]], [[Yingming Gao|AUTHOR Yingming Gao]], [[Wei Wei|AUTHOR Wei Wei]], [[Dengfeng Ke|AUTHOR Dengfeng Ke]], [[Binghuai Lin|AUTHOR Binghuai Lin]], [[Yanlu Xie|AUTHOR Yanlu Xie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-4|PAPER Mon-1-4-4 — Automatic Analysis of Speech Prosody in Dutch]]</div>|<div class="cpsessionviewpapertitle">Automatic Analysis of Speech Prosody in Dutch</div><div class="cpsessionviewpaperauthor">[[Na Hu|AUTHOR Na Hu]], [[Berit Janssen|AUTHOR Berit Janssen]], [[Judith Hanssen|AUTHOR Judith Hanssen]], [[Carlos Gussenhoven|AUTHOR Carlos Gussenhoven]], [[Aoju Chen|AUTHOR Aoju Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2236.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-5|PAPER Mon-1-4-5 — Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting]]</div>|<div class="cpsessionviewpapertitle">Learning Voice Representation Using Knowledge Distillation for Automatic Voice Casting</div><div class="cpsessionviewpaperauthor">[[Adrien Gresse|AUTHOR Adrien Gresse]], [[Mathias Quillot|AUTHOR Mathias Quillot]], [[Richard Dufour|AUTHOR Richard Dufour]], [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-6|PAPER Mon-1-4-6 — Enhancing Formant Information in Spectrographic Display of Speech]]</div>|<div class="cpsessionviewpapertitle">Enhancing Formant Information in Spectrographic Display of Speech</div><div class="cpsessionviewpaperauthor">[[B. Yegnanarayana|AUTHOR B. Yegnanarayana]], [[Anand Joseph|AUTHOR Anand Joseph]], [[Vishala Pannala|AUTHOR Vishala Pannala]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-7|PAPER Mon-1-4-7 — Unsupervised Methods for Evaluating Speech Representations]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Methods for Evaluating Speech Representations</div><div class="cpsessionviewpaperauthor">[[Michael Gump|AUTHOR Michael Gump]], [[Wei-Ning Hsu|AUTHOR Wei-Ning Hsu]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3019.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-8|PAPER Mon-1-4-8 — Robust Pitch Regression with Voiced/Unvoiced Classification in Nonstationary Noise Environments]]</div>|<div class="cpsessionviewpapertitle">Robust Pitch Regression with Voiced/Unvoiced Classification in Nonstationary Noise Environments</div><div class="cpsessionviewpaperauthor">[[Dung N. Tran|AUTHOR Dung N. Tran]], [[Uros Batricevic|AUTHOR Uros Batricevic]], [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3050.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-9|PAPER Mon-1-4-9 — Nonlinear ISA with Auxiliary Variables for Learning Speech Representations]]</div>|<div class="cpsessionviewpapertitle">Nonlinear ISA with Auxiliary Variables for Learning Speech Representations</div><div class="cpsessionviewpaperauthor">[[Amrith Setlur|AUTHOR Amrith Setlur]], [[Barnabás Póczos|AUTHOR Barnabás Póczos]], [[Alan W. Black|AUTHOR Alan W. Black]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-4-10|PAPER Mon-1-4-10 — Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals]]</div>|<div class="cpsessionviewpapertitle">Harmonic Lowering for Accelerating Harmonic Convolution for Audio Signals</div><div class="cpsessionviewpaperauthor">[[Hirotoshi Takeuchi|AUTHOR Hirotoshi Takeuchi]], [[Kunio Kashino|AUTHOR Kunio Kashino]], [[Yasunori Ohishi|AUTHOR Yasunori Ohishi]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Sunayana Sitaram|
|^&nbsp;|^Paavo Alku|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1046.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-1|PAPER Mon-1-5-1 — Knowledge-and-Data-Driven Amplitude Spectrum Prediction for Hierarchical Neural Vocoders]]</div>|<div class="cpsessionviewpapertitle">Knowledge-and-Data-Driven Amplitude Spectrum Prediction for Hierarchical Neural Vocoders</div><div class="cpsessionviewpaperauthor">[[Yang Ai|AUTHOR Yang Ai]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-2|PAPER Mon-1-5-2 — FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction]]</div>|<div class="cpsessionviewpapertitle">FeatherWave: An Efficient High-Fidelity Neural Vocoder with Multi-Band Linear Prediction</div><div class="cpsessionviewpaperauthor">[[Qiao Tian|AUTHOR Qiao Tian]], [[Zewang Zhang|AUTHOR Zewang Zhang]], [[Heng Lu|AUTHOR Heng Lu]], [[Ling-Hui Chen|AUTHOR Ling-Hui Chen]], [[Shan Liu|AUTHOR Shan Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-3|PAPER Mon-1-5-3 — VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network]]</div>|<div class="cpsessionviewpapertitle">VocGAN: A High-Fidelity Real-Time Vocoder with a Hierarchically-Nested Adversarial Network</div><div class="cpsessionviewpaperauthor">[[Jinhyeok Yang|AUTHOR Jinhyeok Yang]], [[Junmo Lee|AUTHOR Junmo Lee]], [[Youngik Kim|AUTHOR Youngik Kim]], [[Hoon-Young Cho|AUTHOR Hoon-Young Cho]], [[Injung Kim|AUTHOR Injung Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1642.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-4|PAPER Mon-1-5-4 — Lightweight LPCNet-Based Neural Vocoder with Tensor Decomposition]]</div>|<div class="cpsessionviewpapertitle">Lightweight LPCNet-Based Neural Vocoder with Tensor Decomposition</div><div class="cpsessionviewpaperauthor">[[Hiroki Kanagawa|AUTHOR Hiroki Kanagawa]], [[Yusuke Ijima|AUTHOR Yusuke Ijima]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1736.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-5|PAPER Mon-1-5-5 — WG-WaveNet: Real-Time High-Fidelity Speech Synthesis Without GPU]]</div>|<div class="cpsessionviewpapertitle">WG-WaveNet: Real-Time High-Fidelity Speech Synthesis Without GPU</div><div class="cpsessionviewpaperauthor">[[Po-chun Hsu|AUTHOR Po-chun Hsu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2103.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-6|PAPER Mon-1-5-6 — What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS]]</div>|<div class="cpsessionviewpapertitle">What the Future Brings: Investigating the Impact of Lookahead for Incremental Neural TTS</div><div class="cpsessionviewpaperauthor">[[Brooke Stephenson|AUTHOR Brooke Stephenson]], [[Laurent Besacier|AUTHOR Laurent Besacier]], [[Laurent Girin|AUTHOR Laurent Girin]], [[Thomas Hueber|AUTHOR Thomas Hueber]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-7|PAPER Mon-1-5-7 — Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet]]</div>|<div class="cpsessionviewpapertitle">Fast and Lightweight On-Device TTS with Tacotron2 and LPCNet</div><div class="cpsessionviewpaperauthor">[[Vadim Popov|AUTHOR Vadim Popov]], [[Stanislav Kamenev|AUTHOR Stanislav Kamenev]], [[Mikhail Kudinov|AUTHOR Mikhail Kudinov]], [[Sergey Repyevsky|AUTHOR Sergey Repyevsky]], [[Tasnima Sadekova|AUTHOR Tasnima Sadekova]], [[Vitalii Bushaev|AUTHOR Vitalii Bushaev]], [[Vladimir Kryzhanovskiy|AUTHOR Vladimir Kryzhanovskiy]], [[Denis Parkhomenko|AUTHOR Denis Parkhomenko]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-8|PAPER Mon-1-5-8 — Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed]]</div>|<div class="cpsessionviewpapertitle">Efficient WaveGlow: An Improved WaveGlow Vocoder with Enhanced Speed</div><div class="cpsessionviewpaperauthor">[[Wei Song|AUTHOR Wei Song]], [[Guanghui Xu|AUTHOR Guanghui Xu]], [[Zhengchen Zhang|AUTHOR Zhengchen Zhang]], [[Chao Zhang|AUTHOR Chao Zhang]], [[Xiaodong He|AUTHOR Xiaodong He]], [[Bowen Zhou|AUTHOR Bowen Zhou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2596.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-9|PAPER Mon-1-5-9 — Can Auditory Nerve Models Tell us What’s Different About WaveNet Vocoded Speech?]]</div>|<div class="cpsessionviewpapertitle">Can Auditory Nerve Models Tell us What’s Different About WaveNet Vocoded Speech?</div><div class="cpsessionviewpaperauthor">[[Sébastien Le Maguer|AUTHOR Sébastien Le Maguer]], [[Naomi Harte|AUTHOR Naomi Harte]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2786.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-10|PAPER Mon-1-5-10 — Speaker Conditional WaveRNN: Towards Universal Neural Vocoder for Unseen Speaker and Recording Conditions]]</div>|<div class="cpsessionviewpapertitle">Speaker Conditional WaveRNN: Towards Universal Neural Vocoder for Unseen Speaker and Recording Conditions</div><div class="cpsessionviewpaperauthor">[[Dipjyoti Paul|AUTHOR Dipjyoti Paul]], [[Yannis Pantazis|AUTHOR Yannis Pantazis]], [[Yannis Stylianou|AUTHOR Yannis Stylianou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3188.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-5-11|PAPER Mon-1-5-11 — Neural Homomorphic Vocoder]]</div>|<div class="cpsessionviewpapertitle">Neural Homomorphic Vocoder</div><div class="cpsessionviewpaperauthor">[[Zhijun Liu|AUTHOR Zhijun Liu]], [[Kuan Chen|AUTHOR Kuan Chen]], [[Kai Yu|AUTHOR Kai Yu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Hagai Aronowitz|
|^&nbsp;|^Yu Wang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-7-1|PAPER Mon-1-7-1 — End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors]]</div>|<div class="cpsessionviewpapertitle">End-to-End Speaker Diarization for an Unknown Number of Speakers with Encoder-Decoder Based Attractors</div><div class="cpsessionviewpaperauthor">[[Shota Horiguchi|AUTHOR Shota Horiguchi]], [[Yusuke Fujita|AUTHOR Yusuke Fujita]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Yawen Xue|AUTHOR Yawen Xue]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1602.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-7-2|PAPER Mon-1-7-2 — Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario]]</div>|<div class="cpsessionviewpapertitle">Target-Speaker Voice Activity Detection: A Novel Approach for Multi-Speaker Diarization in a Dinner Party Scenario</div><div class="cpsessionviewpaperauthor">[[Ivan Medennikov|AUTHOR Ivan Medennikov]], [[Maxim Korenevsky|AUTHOR Maxim Korenevsky]], [[Tatiana Prisyach|AUTHOR Tatiana Prisyach]], [[Yuri Khokhlov|AUTHOR Yuri Khokhlov]], [[Mariya Korenevskaya|AUTHOR Mariya Korenevskaya]], [[Ivan Sorokin|AUTHOR Ivan Sorokin]], [[Tatiana Timofeeva|AUTHOR Tatiana Timofeeva]], [[Anton Mitrofanov|AUTHOR Anton Mitrofanov]], [[Andrei Andrusenko|AUTHOR Andrei Andrusenko]], [[Ivan Podluzhny|AUTHOR Ivan Podluzhny]], [[Aleksandr Laptev|AUTHOR Aleksandr Laptev]], [[Aleksei Romanenko|AUTHOR Aleksei Romanenko]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-7-4|PAPER Mon-1-7-4 — New Advances in Speaker Diarization]]</div>|<div class="cpsessionviewpapertitle">New Advances in Speaker Diarization</div><div class="cpsessionviewpaperauthor">[[Hagai Aronowitz|AUTHOR Hagai Aronowitz]], [[Weizhong Zhu|AUTHOR Weizhong Zhu]], [[Masayuki Suzuki|AUTHOR Masayuki Suzuki]], [[Gakuto Kurata|AUTHOR Gakuto Kurata]], [[Ron Hoory|AUTHOR Ron Hoory]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1908.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-7-5|PAPER Mon-1-7-5 — Self-Attentive Similarity Measurement Strategies in Speaker Diarization]]</div>|<div class="cpsessionviewpapertitle">Self-Attentive Similarity Measurement Strategies in Speaker Diarization</div><div class="cpsessionviewpaperauthor">[[Qingjian Lin|AUTHOR Qingjian Lin]], [[Yu Hou|AUTHOR Yu Hou]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1950.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-7-6|PAPER Mon-1-7-6 — Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning]]</div>|<div class="cpsessionviewpapertitle">Speaker Attribution with Voice Profiles by Graph-Based Semi-Supervised Learning</div><div class="cpsessionviewpaperauthor">[[Jixuan Wang|AUTHOR Jixuan Wang]], [[Xiong Xiao|AUTHOR Xiong Xiao]], [[Jian Wu|AUTHOR Jian Wu]], [[Ranjani Ramamurthy|AUTHOR Ranjani Ramamurthy]], [[Frank Rudzicz|AUTHOR Frank Rudzicz]], [[Michael Brudno|AUTHOR Michael Brudno]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2297.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-7-7|PAPER Mon-1-7-7 — Deep Self-Supervised Hierarchical Clustering for Speaker Diarization]]</div>|<div class="cpsessionviewpapertitle">Deep Self-Supervised Hierarchical Clustering for Speaker Diarization</div><div class="cpsessionviewpaperauthor">[[Prachi Singh|AUTHOR Prachi Singh]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-7-8|PAPER Mon-1-7-8 — Spot the Conversation: Speaker Diarisation in the Wild]]</div>|<div class="cpsessionviewpapertitle">Spot the Conversation: Speaker Diarisation in the Wild</div><div class="cpsessionviewpaperauthor">[[Joon Son Chung|AUTHOR Joon Son Chung]], [[Jaesung Huh|AUTHOR Jaesung Huh]], [[Arsha Nagrani|AUTHOR Arsha Nagrani]], [[Triantafyllos Afouras|AUTHOR Triantafyllos Afouras]], [[Andrew Zisserman|AUTHOR Andrew Zisserman]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 8|<|
|^Chairs:&nbsp;|^Yanmin Qian|
|^&nbsp;|^Ozlem Kalinli|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-1|PAPER Mon-1-8-1 — Learning Contextual Language Embeddings for Monaural Multi-Talker Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Learning Contextual Language Embeddings for Monaural Multi-Talker Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Wangyou Zhang|AUTHOR Wangyou Zhang]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1504.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-2|PAPER Mon-1-8-2 — Double Adversarial Network Based Monaural Speech Enhancement for Robust Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Double Adversarial Network Based Monaural Speech Enhancement for Robust Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zhihao Du|AUTHOR Zhihao Du]], [[Jiqing Han|AUTHOR Jiqing Han]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1497.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-3|PAPER Mon-1-8-3 — Anti-Aliasing Regularization in Stacking Layers]]</div>|<div class="cpsessionviewpapertitle">Anti-Aliasing Regularization in Stacking Layers</div><div class="cpsessionviewpaperauthor">[[Antoine Bruguier|AUTHOR Antoine Bruguier]], [[Ananya Misra|AUTHOR Ananya Misra]], [[Arun Narayanan|AUTHOR Arun Narayanan]], [[Rohit Prabhavalkar|AUTHOR Rohit Prabhavalkar]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-4|PAPER Mon-1-8-4 — Towards a Competitive End-to-End Speech Recognition for CHiME-6 Dinner Party Transcription]]</div>|<div class="cpsessionviewpapertitle">Towards a Competitive End-to-End Speech Recognition for CHiME-6 Dinner Party Transcription</div><div class="cpsessionviewpaperauthor">[[Andrei Andrusenko|AUTHOR Andrei Andrusenko]], [[Aleksandr Laptev|AUTHOR Aleksandr Laptev]], [[Ivan Medennikov|AUTHOR Ivan Medennikov]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-5|PAPER Mon-1-8-5 — End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming]]</div>|<div class="cpsessionviewpapertitle">End-to-End Far-Field Speech Recognition with Unified Dereverberation and Beamforming</div><div class="cpsessionviewpaperauthor">[[Wangyou Zhang|AUTHOR Wangyou Zhang]], [[Aswin Shanmugam Subramanian|AUTHOR Aswin Shanmugam Subramanian]], [[Xuankai Chang|AUTHOR Xuankai Chang]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1682.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-6|PAPER Mon-1-8-6 — Quaternion Neural Networks for Multi-Channel Distant Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Quaternion Neural Networks for Multi-Channel Distant Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Xinchi Qiu|AUTHOR Xinchi Qiu]], [[Titouan Parcollet|AUTHOR Titouan Parcollet]], [[Mirco Ravanelli|AUTHOR Mirco Ravanelli]], [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]], [[Mohamed Morchid|AUTHOR Mohamed Morchid]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1606.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-7|PAPER Mon-1-8-7 — Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario]]</div>|<div class="cpsessionviewpapertitle">Improved Guided Source Separation Integrated with a Strong Back-End for the CHiME-6 Dinner Party Scenario</div><div class="cpsessionviewpaperauthor">[[Hangting Chen|AUTHOR Hangting Chen]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Qian Shi|AUTHOR Qian Shi]], [[Zuozhen Liu|AUTHOR Zuozhen Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1089.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-8|PAPER Mon-1-8-8 — Neural Speech Separation Using Spatially Distributed Microphones]]</div>|<div class="cpsessionviewpapertitle">Neural Speech Separation Using Spatially Distributed Microphones</div><div class="cpsessionviewpaperauthor">[[Dongmei Wang|AUTHOR Dongmei Wang]], [[Zhuo Chen|AUTHOR Zhuo Chen]], [[Takuya Yoshioka|AUTHOR Takuya Yoshioka]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1050.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-9|PAPER Mon-1-8-9 — Utterance-Wise Meeting Transcription System Using Asynchronous Distributed Microphones]]</div>|<div class="cpsessionviewpapertitle">Utterance-Wise Meeting Transcription System Using Asynchronous Distributed Microphones</div><div class="cpsessionviewpaperauthor">[[Shota Horiguchi|AUTHOR Shota Horiguchi]], [[Yusuke Fujita|AUTHOR Yusuke Fujita]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2807.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-8-10|PAPER Mon-1-8-10 — Simulating Realistically-Spatialised Simultaneous Speech Using Video-Driven Speaker Detection and the CHiME-5 Dataset]]</div>|<div class="cpsessionviewpapertitle">Simulating Realistically-Spatialised Simultaneous Speech Using Video-Driven Speaker Detection and the CHiME-5 Dataset</div><div class="cpsessionviewpaperauthor">[[Jack Deadman|AUTHOR Jack Deadman]], [[Jon Barker|AUTHOR Jon Barker]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Dongyan Huang|
|^&nbsp;|^Zixing Zhang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2926.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-1|PAPER Mon-1-9-1 — Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech]]</div>|<div class="cpsessionviewpapertitle">Toward Silent Paralinguistics: Speech-to-EMG — Retrieving Articulatory Muscle Activity from Speech</div><div class="cpsessionviewpaperauthor">[[Catarina Botelho|AUTHOR Catarina Botelho]], [[Lorenz Diener|AUTHOR Lorenz Diener]], [[Dennis Küster|AUTHOR Dennis Küster]], [[Kevin Scheck|AUTHOR Kevin Scheck]], [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]], [[Tanja Schultz|AUTHOR Tanja Schultz]], [[Alberto Abad|AUTHOR Alberto Abad]], [[Isabel Trancoso|AUTHOR Isabel Trancoso]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2320.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-2|PAPER Mon-1-9-2 — Multimodal Deception Detection Using Automatically Extracted Acoustic, Visual, and Lexical Features]]</div>|<div class="cpsessionviewpapertitle">Multimodal Deception Detection Using Automatically Extracted Acoustic, Visual, and Lexical Features</div><div class="cpsessionviewpaperauthor">[[Jiaxuan Zhang|AUTHOR Jiaxuan Zhang]], [[Sarah Ita Levitan|AUTHOR Sarah Ita Levitan]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1653.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-3|PAPER Mon-1-9-3 — Multi-Modal Attention for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Multi-Modal Attention for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Zexu Pan|AUTHOR Zexu Pan]], [[Zhaojie Luo|AUTHOR Zhaojie Luo]], [[Jichen Yang|AUTHOR Jichen Yang]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-4|PAPER Mon-1-9-4 — WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">WISE: Word-Level Interaction-Based Multimodal Fusion for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Guang Shen|AUTHOR Guang Shen]], [[Riwei Lai|AUTHOR Riwei Lai]], [[Rui Chen|AUTHOR Rui Chen]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Kejia Zhang|AUTHOR Kejia Zhang]], [[Qilong Han|AUTHOR Qilong Han]], [[Hongtao Song|AUTHOR Hongtao Song]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-5|PAPER Mon-1-9-5 — A Multi-Scale Fusion Framework for Bimodal Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">A Multi-Scale Fusion Framework for Bimodal Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Ming Chen|AUTHOR Ming Chen]], [[Xudong Zhao|AUTHOR Xudong Zhao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2067.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-6|PAPER Mon-1-9-6 — Group Gated Fusion on Attention-Based Bidirectional Alignment for Multimodal Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Group Gated Fusion on Attention-Based Bidirectional Alignment for Multimodal Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Pengfei Liu|AUTHOR Pengfei Liu]], [[Kun Li|AUTHOR Kun Li]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1827.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-7|PAPER Mon-1-9-7 — Multi-Modal Embeddings Using Multi-Task Learning for Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Multi-Modal Embeddings Using Multi-Task Learning for Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Aparna Khare|AUTHOR Aparna Khare]], [[Srinivas Parthasarathy|AUTHOR Srinivas Parthasarathy]], [[Shiva Sundaram|AUTHOR Shiva Sundaram]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1688.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-8|PAPER Mon-1-9-8 — Using Speaker-Aligned Graph Memory Block in Multimodally Attentive Emotion Recognition Network]]</div>|<div class="cpsessionviewpapertitle">Using Speaker-Aligned Graph Memory Block in Multimodally Attentive Emotion Recognition Network</div><div class="cpsessionviewpaperauthor">[[Jeng-Lin Li|AUTHOR Jeng-Lin Li]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1705.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-1-9-9|PAPER Mon-1-9-9 — Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Context-Dependent Domain Adversarial Neural Network for Multimodal Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Zheng Lian|AUTHOR Zheng Lian]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jian Huang|AUTHOR Jian Huang]], [[Zhanlei Yang|AUTHOR Zhanlei Yang]], [[Rongjun Li|AUTHOR Rongjun Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Gábor Gosztolya|
|^&nbsp;|^Yongwei Li|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1869.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-1|PAPER Mon-2-1-1 — Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models]]</div>|<div class="cpsessionviewpapertitle">Enhancing Transferability of Black-Box Adversarial Attacks via Lifelong Learning for Speech Emotion Recognition Models</div><div class="cpsessionviewpaperauthor">[[Zhao Ren|AUTHOR Zhao Ren]], [[Jing Han|AUTHOR Jing Han]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-2|PAPER Mon-2-1-2 — End-to-End Speech Emotion Recognition Combined with Acoustic-to-Word ASR Model]]</div>|<div class="cpsessionviewpapertitle">End-to-End Speech Emotion Recognition Combined with Acoustic-to-Word ASR Model</div><div class="cpsessionviewpaperauthor">[[Han Feng|AUTHOR Han Feng]], [[Sei Ueno|AUTHOR Sei Ueno]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1733.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-3|PAPER Mon-2-1-3 — Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network]]</div>|<div class="cpsessionviewpapertitle">Improving Speech Emotion Recognition Using Graph Attentive Bi-Directional Gated Recurrent Unit Network</div><div class="cpsessionviewpaperauthor">[[Bo-Hao Su|AUTHOR Bo-Hao Su]], [[Chun-Min Chang|AUTHOR Chun-Min Chang]], [[Yun-Shao Lin|AUTHOR Yun-Shao Lin]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-4|PAPER Mon-2-1-4 — An Investigation of Cross-Cultural Semi-Supervised Learning for Continuous Affect Recognition]]</div>|<div class="cpsessionviewpapertitle">An Investigation of Cross-Cultural Semi-Supervised Learning for Continuous Affect Recognition</div><div class="cpsessionviewpaperauthor">[[Adria Mallol-Ragolta|AUTHOR Adria Mallol-Ragolta]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2694.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-5|PAPER Mon-2-1-5 — Ensemble of Students Taught by Probabilistic Teachers to Improve Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Ensemble of Students Taught by Probabilistic Teachers to Improve Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Kusha Sridhar|AUTHOR Kusha Sridhar]], [[Carlos Busso|AUTHOR Carlos Busso]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-6|PAPER Mon-2-1-6 — Augmenting Generative Adversarial Networks for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Augmenting Generative Adversarial Networks for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Siddique Latif|AUTHOR Siddique Latif]], [[Muhammad Asim|AUTHOR Muhammad Asim]], [[Rajib Rana|AUTHOR Rajib Rana]], [[Sara Khalifa|AUTHOR Sara Khalifa]], [[Raja Jurdak|AUTHOR Raja Jurdak]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1356.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-7|PAPER Mon-2-1-7 — Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder]]</div>|<div class="cpsessionviewpapertitle">Speech Emotion Recognition ‘in the Wild’ Using an Autoencoder</div><div class="cpsessionviewpaperauthor">[[Vipula Dissanayake|AUTHOR Vipula Dissanayake]], [[Haimo Zhang|AUTHOR Haimo Zhang]], [[Mark Billinghurst|AUTHOR Mark Billinghurst]], [[Suranga Nanayakkara|AUTHOR Suranga Nanayakkara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1771.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-8|PAPER Mon-2-1-8 — Emotion Profile Refinery for Speech Emotion Classification]]</div>|<div class="cpsessionviewpapertitle">Emotion Profile Refinery for Speech Emotion Classification</div><div class="cpsessionviewpaperauthor">[[Shuiyang Mao|AUTHOR Shuiyang Mao]], [[P.C. Ching|AUTHOR P.C. Ching]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2524.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-1-9|PAPER Mon-2-1-9 — Speech Representation Learning for Emotion Recognition Using End-to-End ASR with Factorized Adaptation]]</div>|<div class="cpsessionviewpapertitle">Speech Representation Learning for Emotion Recognition Using End-to-End ASR with Factorized Adaptation</div><div class="cpsessionviewpaperauthor">[[Sung-Lin Yeh|AUTHOR Sung-Lin Yeh]], [[Yun-Shao Lin|AUTHOR Yun-Shao Lin]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Rosa González Hautamäki|
|^&nbsp;|^Zhijian Ou|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-1|PAPER Mon-2-10-1 — AutoSpeech: Neural Architecture Search for Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">AutoSpeech: Neural Architecture Search for Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Shaojin Ding|AUTHOR Shaojin Ding]], [[Tianlong Chen|AUTHOR Tianlong Chen]], [[Xinyu Gong|AUTHOR Xinyu Gong]], [[Weiwei Zha|AUTHOR Weiwei Zha]], [[Zhangyang Wang|AUTHOR Zhangyang Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-2|PAPER Mon-2-10-2 — Densely Connected Time Delay Neural Network for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Densely Connected Time Delay Neural Network for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Ya-Qi Yu|AUTHOR Ya-Qi Yu]], [[Wu-Jun Li|AUTHOR Wu-Jun Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1306.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-3|PAPER Mon-2-10-3 — Phonetically-Aware Coupled Network For Short Duration Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Phonetically-Aware Coupled Network For Short Duration Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Siqi Zheng|AUTHOR Siqi Zheng]], [[Yun Lei|AUTHOR Yun Lei]], [[Hongbin Suo|AUTHOR Hongbin Suo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1420.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-4|PAPER Mon-2-10-4 — Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention]]</div>|<div class="cpsessionviewpapertitle">Multi-Task Network for Noise-Robust Keyword Spotting and Speaker Verification Using CTC-Based Soft VAD and Global Query Attention</div><div class="cpsessionviewpaperauthor">[[Myunghun Jung|AUTHOR Myunghun Jung]], [[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Jahyun Goo|AUTHOR Jahyun Goo]], [[Hoirin Kim|AUTHOR Hoirin Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1422.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-5|PAPER Mon-2-10-5 — Vector-Based Attentive Pooling for Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Vector-Based Attentive Pooling for Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Yanfeng Wu|AUTHOR Yanfeng Wu]], [[Chenkai Guo|AUTHOR Chenkai Guo]], [[Hongcan Gao|AUTHOR Hongcan Gao]], [[Xiaolei Hou|AUTHOR Xiaolei Hou]], [[Jing Xu|AUTHOR Jing Xu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1446.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-6|PAPER Mon-2-10-6 — Self-Attention Encoding and Pooling for Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Self-Attention Encoding and Pooling for Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Pooyan Safari|AUTHOR Pooyan Safari]], [[Miquel India|AUTHOR Miquel India]], [[Javier Hernando|AUTHOR Javier Hernando]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1626.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-7|PAPER Mon-2-10-7 — ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">ARET: Aggregated Residual Extended Time-Delay Neural Networks for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Ruiteng Zhang|AUTHOR Ruiteng Zhang]], [[Jianguo Wei|AUTHOR Jianguo Wei]], [[Wenhuan Lu|AUTHOR Wenhuan Lu]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Meng Liu|AUTHOR Meng Liu]], [[Lin Zhang|AUTHOR Lin Zhang]], [[Jiayu Jin|AUTHOR Jiayu Jin]], [[Junhai Xu|AUTHOR Junhai Xu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1966.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-8|PAPER Mon-2-10-8 — Adversarial Separation Network for Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Adversarial Separation Network for Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Hanyi Zhang|AUTHOR Hanyi Zhang]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Yunchun Zhang|AUTHOR Yunchun Zhang]], [[Meng Liu|AUTHOR Meng Liu]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Jianguo Wei|AUTHOR Jianguo Wei]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-9|PAPER Mon-2-10-9 — Text-Independent Speaker Verification with Dual Attention Network]]</div>|<div class="cpsessionviewpapertitle">Text-Independent Speaker Verification with Dual Attention Network</div><div class="cpsessionviewpaperauthor">[[Jingyu Li|AUTHOR Jingyu Li]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-10-10|PAPER Mon-2-10-10 — Evolutionary Algorithm Enhanced Neural Architecture Search for Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Evolutionary Algorithm Enhanced Neural Architecture Search for Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Xiaoyang Qu|AUTHOR Xiaoyang Qu]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 11|<|
|^Chairs:&nbsp;|^Michael Seltzer|
|^&nbsp;|^Dan Povey|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-1|PAPER Mon-2-11-1 — Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Minimum Bayes Risk Training of RNN-Transducer for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Chao Weng|AUTHOR Chao Weng]], [[Chengzhu Yu|AUTHOR Chengzhu Yu]], [[Jia Cui|AUTHOR Jia Cui]], [[Chunlei Zhang|AUTHOR Chunlei Zhang]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-2|PAPER Mon-2-11-2 — Semantic Mask for Transformer Based End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Semantic Mask for Transformer Based End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Chengyi Wang|AUTHOR Chengyi Wang]], [[Yu Wu|AUTHOR Yu Wu]], [[Yujiao Du|AUTHOR Yujiao Du]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Shujie Liu|AUTHOR Shujie Liu]], [[Liang Lu|AUTHOR Liang Lu]], [[Shuo Ren|AUTHOR Shuo Ren]], [[Guoli Ye|AUTHOR Guoli Ye]], [[Sheng Zhao|AUTHOR Sheng Zhao]], [[Ming Zhou|AUTHOR Ming Zhou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-3|PAPER Mon-2-11-3 — Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces]]</div>|<div class="cpsessionviewpapertitle">Faster, Simpler and More Accurate Hybrid ASR Systems Using Wordpieces</div><div class="cpsessionviewpaperauthor">[[Frank Zhang|AUTHOR Frank Zhang]], [[Yongqiang Wang|AUTHOR Yongqiang Wang]], [[Xiaohui Zhang|AUTHOR Xiaohui Zhang]], [[Chunxi Liu|AUTHOR Chunxi Liu]], [[Yatharth Saraf|AUTHOR Yatharth Saraf]], [[Geoffrey Zweig|AUTHOR Geoffrey Zweig]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-4|PAPER Mon-2-11-4 — A Federated Approach in Training Acoustic Models]]</div>|<div class="cpsessionviewpapertitle">A Federated Approach in Training Acoustic Models</div><div class="cpsessionviewpaperauthor">[[Dimitrios Dimitriadis|AUTHOR Dimitrios Dimitriadis]], [[Kenichi Kumatani|AUTHOR Kenichi Kumatani]], [[Robert Gmyr|AUTHOR Robert Gmyr]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Sefik Emre Eskimez|AUTHOR Sefik Emre Eskimez]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2242.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-5|PAPER Mon-2-11-5 — On Semi-Supervised LF-MMI Training of Acoustic Models with Limited Data]]</div>|<div class="cpsessionviewpapertitle">On Semi-Supervised LF-MMI Training of Acoustic Models with Limited Data</div><div class="cpsessionviewpaperauthor">[[Imran Sheikh|AUTHOR Imran Sheikh]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]], [[Irina Illina|AUTHOR Irina Illina]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-6|PAPER Mon-2-11-6 — On Front-End Gain Invariant Modeling for Wake Word Spotting]]</div>|<div class="cpsessionviewpapertitle">On Front-End Gain Invariant Modeling for Wake Word Spotting</div><div class="cpsessionviewpaperauthor">[[Yixin Gao|AUTHOR Yixin Gao]], [[Noah D. Stein|AUTHOR Noah D. Stein]], [[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]], [[Yunliang Cai|AUTHOR Yunliang Cai]], [[Ming Sun|AUTHOR Ming Sun]], [[Tao Zhang|AUTHOR Tao Zhang]], [[Shiv Naga Prasad Vitaladevuni|AUTHOR Shiv Naga Prasad Vitaladevuni]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-7|PAPER Mon-2-11-7 — Unsupervised Regularization-Based Adaptive Training for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Regularization-Based Adaptive Training for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Fenglin Ding|AUTHOR Fenglin Ding]], [[Wu Guo|AUTHOR Wu Guo]], [[Bin Gu|AUTHOR Bin Gu]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Jun Du|AUTHOR Jun Du]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0017.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-8|PAPER Mon-2-11-8 — On the Robustness and Training Dynamics of Raw Waveform Models]]</div>|<div class="cpsessionviewpapertitle">On the Robustness and Training Dynamics of Raw Waveform Models</div><div class="cpsessionviewpaperauthor">[[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1800.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-11-9|PAPER Mon-2-11-9 — Iterative Pseudo-Labeling for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Iterative Pseudo-Labeling for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Qiantong Xu|AUTHOR Qiantong Xu]], [[Tatiana Likhomanenko|AUTHOR Tatiana Likhomanenko]], [[Jacob Kahn|AUTHOR Jacob Kahn]], [[Awni Hannun|AUTHOR Awni Hannun]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 12|<|
|^Chair:&nbsp;|^Zhijian Ou|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4002.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-12-1|PAPER Mon-2-12-1 — Smart Tube: A Biofeedback System for Vocal Training and Therapy Through Tube Phonation]]</div>|<div class="cpsessionviewpapertitle">Smart Tube: A Biofeedback System for Vocal Training and Therapy Through Tube Phonation</div><div class="cpsessionviewpaperauthor">[[Naoko Kawamura|AUTHOR Naoko Kawamura]], [[Tatsuya Kitamura|AUTHOR Tatsuya Kitamura]], [[Kenta Hamada|AUTHOR Kenta Hamada]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-12-2|PAPER Mon-2-12-2 — VCTUBE : A Library for Automatic Speech Data Annotation]]</div>|<div class="cpsessionviewpapertitle">VCTUBE : A Library for Automatic Speech Data Annotation</div><div class="cpsessionviewpaperauthor">[[Seong Choi|AUTHOR Seong Choi]], [[Seunghoon Jeong|AUTHOR Seunghoon Jeong]], [[Jeewoo Yoon|AUTHOR Jeewoo Yoon]], [[Migyeong Yang|AUTHOR Migyeong Yang]], [[Minsam Ko|AUTHOR Minsam Ko]], [[Eunil Park|AUTHOR Eunil Park]], [[Jinyoung Han|AUTHOR Jinyoung Han]], [[Munyoung Lee|AUTHOR Munyoung Lee]], [[Seonghee Lee|AUTHOR Seonghee Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-12-3|PAPER Mon-2-12-3 — A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback]]</div>|<div class="cpsessionviewpapertitle">A Mandarin L2 Learning APP with Mispronunciation Detection and Feedback</div><div class="cpsessionviewpaperauthor">[[Yanlu Xie|AUTHOR Yanlu Xie]], [[Xiaoli Feng|AUTHOR Xiaoli Feng]], [[Boxue Li|AUTHOR Boxue Li]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]], [[Yujia Jin|AUTHOR Yujia Jin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4008.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-12-4|PAPER Mon-2-12-4 — Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains]]</div>|<div class="cpsessionviewpapertitle">Rapid Enhancement of NLP Systems by Acquisition of Data in Correlated Domains</div><div class="cpsessionviewpaperauthor">[[Tejas Udayakumar|AUTHOR Tejas Udayakumar]], [[Kinnera Saranu|AUTHOR Kinnera Saranu]], [[Mayuresh Sanjay Oak|AUTHOR Mayuresh Sanjay Oak]], [[Ajit Ashok Saunshikar|AUTHOR Ajit Ashok Saunshikar]], [[Sandip Shriram Bapat|AUTHOR Sandip Shriram Bapat]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-12-5|PAPER Mon-2-12-5 — Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil]]</div>|<div class="cpsessionviewpapertitle">Computer-Assisted Language Learning System: Automatic Speech Evaluation for Children Learning Malay and Tamil</div><div class="cpsessionviewpaperauthor">[[Ke Shi|AUTHOR Ke Shi]], [[Kye Min Tan|AUTHOR Kye Min Tan]], [[Richeng Duan|AUTHOR Richeng Duan]], [[Siti Umairah Md. Salleh|AUTHOR Siti Umairah Md. Salleh]], [[Nur Farah Ain Suhaimi|AUTHOR Nur Farah Ain Suhaimi]], [[Rajan Vellu|AUTHOR Rajan Vellu]], [[Ngoc Thuy Huong Helen Thai|AUTHOR Ngoc Thuy Huong Helen Thai]], [[Nancy F. Chen|AUTHOR Nancy F. Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-12-6|PAPER Mon-2-12-6 — Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU]]</div>|<div class="cpsessionviewpapertitle">Real-Time, Full-Band, Online DNN-Based Voice Conversion System Using a Single CPU</div><div class="cpsessionviewpaperauthor">[[Takaaki Saeki|AUTHOR Takaaki Saeki]], [[Yuki Saito|AUTHOR Yuki Saito]], [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4012.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-12-7|PAPER Mon-2-12-7 — A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy]]</div>|<div class="cpsessionviewpapertitle">A Dynamic 3D Pronunciation Teaching Model Based on Pronunciation Attributes and Anatomy</div><div class="cpsessionviewpaperauthor">[[Xiaoli Feng|AUTHOR Xiaoli Feng]], [[Yanlu Xie|AUTHOR Yanlu Xie]], [[Yayue Deng|AUTHOR Yayue Deng]], [[Boxue Li|AUTHOR Boxue Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/4015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-12-8|PAPER Mon-2-12-8 — End-to-End Deep Learning Speech Recognition Model for Silent Speech Challenge]]</div>|<div class="cpsessionviewpapertitle">End-to-End Deep Learning Speech Recognition Model for Silent Speech Challenge</div><div class="cpsessionviewpaperauthor">[[Naoki Kimura|AUTHOR Naoki Kimura]], [[Zixiong Su|AUTHOR Zixiong Su]], [[Takaaki Saeki|AUTHOR Takaaki Saeki]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Chiori Hori|
|^&nbsp;|^Yu Zhang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-1|PAPER Mon-2-2-1 — Fast and Slow Acoustic Model]]</div>|<div class="cpsessionviewpapertitle">Fast and Slow Acoustic Model</div><div class="cpsessionviewpaperauthor">[[Kshitiz Kumar|AUTHOR Kshitiz Kumar]], [[Emilian Stoimenov|AUTHOR Emilian Stoimenov]], [[Hosam Khalil|AUTHOR Hosam Khalil]], [[Jian Wu|AUTHOR Jian Wu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-2|PAPER Mon-2-2-2 — Self-Distillation for Improving CTC-Transformer-Based ASR Systems]]</div>|<div class="cpsessionviewpapertitle">Self-Distillation for Improving CTC-Transformer-Based ASR Systems</div><div class="cpsessionviewpaperauthor">[[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Tsubasa Ochiai|AUTHOR Tsubasa Ochiai]], [[Shigeki Karita|AUTHOR Shigeki Karita]], [[Hiroshi Sato|AUTHOR Hiroshi Sato]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Takanori Ashihara|AUTHOR Takanori Ashihara]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Yusuke Shinohara|AUTHOR Yusuke Shinohara]], [[Marc Delcroix|AUTHOR Marc Delcroix]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-3|PAPER Mon-2-2-3 — Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard]]</div>|<div class="cpsessionviewpapertitle">Single Headed Attention Based Sequence-to-Sequence Model for State-of-the-Art Results on Switchboard</div><div class="cpsessionviewpaperauthor">[[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[George Saon|AUTHOR George Saon]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-4|PAPER Mon-2-2-4 — Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection]]</div>|<div class="cpsessionviewpapertitle">Improving Speech Recognition Using GAN-Based Speech Synthesis and Contrastive Unspoken Text Selection</div><div class="cpsessionviewpaperauthor">[[Zhehuai Chen|AUTHOR Zhehuai Chen]], [[Andrew Rosenberg|AUTHOR Andrew Rosenberg]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Gary Wang|AUTHOR Gary Wang]], [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]], [[Pedro J. Moreno|AUTHOR Pedro J. Moreno]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-5|PAPER Mon-2-2-5 — PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR]]</div>|<div class="cpsessionviewpapertitle">PYCHAIN: A Fully Parallelized PyTorch Implementation of LF-MMI for End-to-End ASR</div><div class="cpsessionviewpaperauthor">[[Yiwen Shao|AUTHOR Yiwen Shao]], [[Yiming Wang|AUTHOR Yiming Wang]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2732.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-6|PAPER Mon-2-2-6 — CAT: A CTC-CRF Based ASR Toolkit Bridging the Hybrid and the End-to-End Approaches Towards Data Efficiency and Low Latency]]</div>|<div class="cpsessionviewpapertitle">CAT: A CTC-CRF Based ASR Toolkit Bridging the Hybrid and the End-to-End Approaches Towards Data Efficiency and Low Latency</div><div class="cpsessionviewpaperauthor">[[Keyu An|AUTHOR Keyu An]], [[Hongyu Xiang|AUTHOR Hongyu Xiang]], [[Zhijian Ou|AUTHOR Zhijian Ou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-7|PAPER Mon-2-2-7 — CTC-Synchronous Training for Monotonic Attention Model]]</div>|<div class="cpsessionviewpapertitle">CTC-Synchronous Training for Monotonic Attention Model</div><div class="cpsessionviewpaperauthor">[[Hirofumi Inaguma|AUTHOR Hirofumi Inaguma]], [[Masato Mimura|AUTHOR Masato Mimura]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1797.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-8|PAPER Mon-2-2-8 — Continual Learning for Multi-Dialect Acoustic Models]]</div>|<div class="cpsessionviewpapertitle">Continual Learning for Multi-Dialect Acoustic Models</div><div class="cpsessionviewpaperauthor">[[Brady Houston|AUTHOR Brady Houston]], [[Katrin Kirchhoff|AUTHOR Katrin Kirchhoff]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2275.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-2-9|PAPER Mon-2-2-9 — SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">SpecSwap: A Simple Data Augmentation Method for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Xingchen Song|AUTHOR Xingchen Song]], [[Zhiyong Wu|AUTHOR Zhiyong Wu]], [[Yiheng Huang|AUTHOR Yiheng Huang]], [[Dan Su|AUTHOR Dan Su]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Petra Wagner|
|^&nbsp;|^Steve Renals|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1184.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-1|PAPER Mon-2-3-1 — RECOApy: Data Recording, Pre-Processing and Phonetic Transcription for End-to-End Speech-Based Applications]]</div>|<div class="cpsessionviewpapertitle">RECOApy: Data Recording, Pre-Processing and Phonetic Transcription for End-to-End Speech-Based Applications</div><div class="cpsessionviewpaperauthor">[[Adriana Stan|AUTHOR Adriana Stan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-2|PAPER Mon-2-3-2 — Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer]]</div>|<div class="cpsessionviewpapertitle">Analyzing the Quality and Stability of a Streaming End-to-End On-Device Speech Recognizer</div><div class="cpsessionviewpaperauthor">[[Yuan Shangguan|AUTHOR Yuan Shangguan]], [[Kate Knister|AUTHOR Kate Knister]], [[Yanzhang He|AUTHOR Yanzhang He]], [[Ian McGraw|AUTHOR Ian McGraw]], [[Françoise Beaufays|AUTHOR Françoise Beaufays]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1338.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-3|PAPER Mon-2-3-3 — Statistical Testing on ASR Performance via Blockwise Bootstrap]]</div>|<div class="cpsessionviewpapertitle">Statistical Testing on ASR Performance via Blockwise Bootstrap</div><div class="cpsessionviewpaperauthor">[[Zhe Liu|AUTHOR Zhe Liu]], [[Fuchun Peng|AUTHOR Fuchun Peng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1841.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-4|PAPER Mon-2-3-4 — Sentence Level Estimation of Psycholinguistic Norms Using Joint Multidimensional Annotations]]</div>|<div class="cpsessionviewpapertitle">Sentence Level Estimation of Psycholinguistic Norms Using Joint Multidimensional Annotations</div><div class="cpsessionviewpaperauthor">[[Anil Ramakrishna|AUTHOR Anil Ramakrishna]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-5|PAPER Mon-2-3-5 — Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System]]</div>|<div class="cpsessionviewpapertitle">Neural Zero-Inflated Quality Estimation Model for Automatic Speech Recognition System</div><div class="cpsessionviewpaperauthor">[[Kai Fan|AUTHOR Kai Fan]], [[Bo Li|AUTHOR Bo Li]], [[Jiayi Wang|AUTHOR Jiayi Wang]], [[Shiliang Zhang|AUTHOR Shiliang Zhang]], [[Boxing Chen|AUTHOR Boxing Chen]], [[Niyu Ge|AUTHOR Niyu Ge]], [[Zhijie Yan|AUTHOR Zhijie Yan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2215.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-6|PAPER Mon-2-3-6 — Confidence Measures in Encoder-Decoder Models for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Confidence Measures in Encoder-Decoder Models for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Alejandro Woodward|AUTHOR Alejandro Woodward]], [[Clara Bonnín|AUTHOR Clara Bonnín]], [[Issey Masuda|AUTHOR Issey Masuda]], [[David Varas|AUTHOR David Varas]], [[Elisenda Bou-Balust|AUTHOR Elisenda Bou-Balust]], [[Juan Carlos Riveiro|AUTHOR Juan Carlos Riveiro]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2357.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-7|PAPER Mon-2-3-7 — Word Error Rate Estimation Without ASR Output: e-WER2]]</div>|<div class="cpsessionviewpapertitle">Word Error Rate Estimation Without ASR Output: e-WER2</div><div class="cpsessionviewpaperauthor">[[Ahmed Ali|AUTHOR Ahmed Ali]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2521.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-8|PAPER Mon-2-3-8 — An Evaluation of Manual and Semi-Automatic Laughter Annotation]]</div>|<div class="cpsessionviewpapertitle">An Evaluation of Manual and Semi-Automatic Laughter Annotation</div><div class="cpsessionviewpaperauthor">[[Bogdan Ludusan|AUTHOR Bogdan Ludusan]], [[Petra Wagner|AUTHOR Petra Wagner]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2893.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-3-9|PAPER Mon-2-3-9 — Understanding Racial Disparities in Automatic Speech Recognition: The Case of Habitual “be”]]</div>|<div class="cpsessionviewpapertitle">Understanding Racial Disparities in Automatic Speech Recognition: The Case of Habitual “be”</div><div class="cpsessionviewpaperauthor">[[Joshua L. Martin|AUTHOR Joshua L. Martin]], [[Kevin Tang|AUTHOR Kevin Tang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Philippe Martin|
|^&nbsp;|^Zhiqiang Li|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1322.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-1|PAPER Mon-2-4-1 — Secondary Phonetic Cues in the Production of the Nasal Short-a System in California English]]</div>|<div class="cpsessionviewpapertitle">Secondary Phonetic Cues in the Production of the Nasal Short-a System in California English</div><div class="cpsessionviewpaperauthor">[[Georgia Zellou|AUTHOR Georgia Zellou]], [[Rebecca Scarborough|AUTHOR Rebecca Scarborough]], [[Renee Kemp|AUTHOR Renee Kemp]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-2|PAPER Mon-2-4-2 — Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination]]</div>|<div class="cpsessionviewpapertitle">Acoustic Properties of Strident Fricatives at the Edges: Implications for Consonant Discrimination</div><div class="cpsessionviewpaperauthor">[[Louis-Marie Lorin|AUTHOR Louis-Marie Lorin]], [[Lorenzo Maselli|AUTHOR Lorenzo Maselli]], [[Léo Varnet|AUTHOR Léo Varnet]], [[Maria Giavazzi|AUTHOR Maria Giavazzi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-3|PAPER Mon-2-4-3 — Processes and Consequences of Co-Articulation  in Mandarin V,,1,,N.(C,,2,,)V,,2,, Context: Phonology and Phonetics]]</div>|<div class="cpsessionviewpapertitle">Processes and Consequences of Co-Articulation  in Mandarin V,,1,,N.(C,,2,,)V,,2,, Context: Phonology and Phonetics</div><div class="cpsessionviewpaperauthor">[[Mingqiong Luo|AUTHOR Mingqiong Luo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1259.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-4|PAPER Mon-2-4-4 — Voicing Distinction of Obstruents in the Hangzhou Wu Chinese Dialect]]</div>|<div class="cpsessionviewpapertitle">Voicing Distinction of Obstruents in the Hangzhou Wu Chinese Dialect</div><div class="cpsessionviewpaperauthor">[[Yang Yue|AUTHOR Yang Yue]], [[Fang Hu|AUTHOR Fang Hu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2375.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-5|PAPER Mon-2-4-5 — The Phonology and Phonetics of Kaifeng Mandarin Vowels]]</div>|<div class="cpsessionviewpapertitle">The Phonology and Phonetics of Kaifeng Mandarin Vowels</div><div class="cpsessionviewpaperauthor">[[Lei Wang|AUTHOR Lei Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2353.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-6|PAPER Mon-2-4-6 — Microprosodic Variability in Plosives in German and Austrian German]]</div>|<div class="cpsessionviewpapertitle">Microprosodic Variability in Plosives in German and Austrian German</div><div class="cpsessionviewpaperauthor">[[Margaret Zellers|AUTHOR Margaret Zellers]], [[Barbara Schuppler|AUTHOR Barbara Schuppler]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2453.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-7|PAPER Mon-2-4-7 — //Er//-Suffixation in Southwestern Mandarin: An EMA and Ultrasound Study]]</div>|<div class="cpsessionviewpapertitle">//Er//-Suffixation in Southwestern Mandarin: An EMA and Ultrasound Study</div><div class="cpsessionviewpaperauthor">[[Jing Huang|AUTHOR Jing Huang]], [[Feng-fan Hsieh|AUTHOR Feng-fan Hsieh]], [[Yueh-chin Chang|AUTHOR Yueh-chin Chang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2350.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-8|PAPER Mon-2-4-8 — Electroglottographic-Phonetic Study on Korean Phonation Induced by Tripartite Plosives in Yanbian Korean]]</div>|<div class="cpsessionviewpapertitle">Electroglottographic-Phonetic Study on Korean Phonation Induced by Tripartite Plosives in Yanbian Korean</div><div class="cpsessionviewpaperauthor">[[Yinghao Li|AUTHOR Yinghao Li]], [[Jinghua Zhang|AUTHOR Jinghua Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-4-9|PAPER Mon-2-4-9 — Modeling Global Body Configurations in American Sign Language]]</div>|<div class="cpsessionviewpapertitle">Modeling Global Body Configurations in American Sign Language</div><div class="cpsessionviewpaperauthor">[[Nicholas Wilkins|AUTHOR Nicholas Wilkins]], [[Max Cordes Galbraith|AUTHOR Max Cordes Galbraith]], [[Ifeoma Nwogu|AUTHOR Ifeoma Nwogu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Ganna Raboshchuk|
|^&nbsp;|^Sheng Li|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-1|PAPER Mon-2-5-1 — Augmenting Turn-Taking Prediction with Wearable Eye Activity During Conversation]]</div>|<div class="cpsessionviewpapertitle">Augmenting Turn-Taking Prediction with Wearable Eye Activity During Conversation</div><div class="cpsessionviewpaperauthor">[[Hang Li|AUTHOR Hang Li]], [[Siyuan Chen|AUTHOR Siyuan Chen]], [[Julien Epps|AUTHOR Julien Epps]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1192.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-2|PAPER Mon-2-5-2 — CAM: Uninteresting Speech Detector]]</div>|<div class="cpsessionviewpapertitle">CAM: Uninteresting Speech Detector</div><div class="cpsessionviewpaperauthor">[[Weiyi Lu|AUTHOR Weiyi Lu]], [[Yi Xu|AUTHOR Yi Xu]], [[Peng Yang|AUTHOR Peng Yang]], [[Belinda Zeng|AUTHOR Belinda Zeng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-3|PAPER Mon-2-5-3 — Mixed Case Contextual ASR Using Capitalization Masks]]</div>|<div class="cpsessionviewpapertitle">Mixed Case Contextual ASR Using Capitalization Masks</div><div class="cpsessionviewpaperauthor">[[Diamantino Caseiro|AUTHOR Diamantino Caseiro]], [[Pat Rondon|AUTHOR Pat Rondon]], [[Quoc-Nam Le The|AUTHOR Quoc-Nam Le The]], [[Petar Aleksic|AUTHOR Petar Aleksic]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-4|PAPER Mon-2-5-4 — Speech Recognition and Multi-Speaker Diarization of Long Conversations]]</div>|<div class="cpsessionviewpapertitle">Speech Recognition and Multi-Speaker Diarization of Long Conversations</div><div class="cpsessionviewpaperauthor">[[Huanru Henry Mao|AUTHOR Huanru Henry Mao]], [[Shuyang Li|AUTHOR Shuyang Li]], [[Julian McAuley|AUTHOR Julian McAuley]], [[Garrison W. Cottrell|AUTHOR Garrison W. Cottrell]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1161.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-5|PAPER Mon-2-5-5 — Investigation of Data Augmentation Techniques for Disordered Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Investigation of Data Augmentation Techniques for Disordered Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Mengzhe Geng|AUTHOR Mengzhe Geng]], [[Xurong Xie|AUTHOR Xurong Xie]], [[Shansong Liu|AUTHOR Shansong Liu]], [[Jianwei Yu|AUTHOR Jianwei Yu]], [[Shoukang Hu|AUTHOR Shoukang Hu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2105.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-6|PAPER Mon-2-5-6 — A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection]]</div>|<div class="cpsessionviewpapertitle">A Real-Time Robot-Based Auxiliary System for Risk Evaluation of COVID-19 Infection</div><div class="cpsessionviewpaperauthor">[[Wenqi Wei|AUTHOR Wenqi Wei]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Jiteng Ma|AUTHOR Jiteng Ma]], [[Ning Cheng|AUTHOR Ning Cheng]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2265.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-7|PAPER Mon-2-5-7 — An Utterance Verification System for Word Naming Therapy in Aphasia]]</div>|<div class="cpsessionviewpapertitle">An Utterance Verification System for Word Naming Therapy in Aphasia</div><div class="cpsessionviewpaperauthor">[[David S. Barbera|AUTHOR David S. Barbera]], [[Mark Huckvale|AUTHOR Mark Huckvale]], [[Victoria Fleming|AUTHOR Victoria Fleming]], [[Emily Upton|AUTHOR Emily Upton]], [[Henry Coley-Fisher|AUTHOR Henry Coley-Fisher]], [[Ian Shaw|AUTHOR Ian Shaw]], [[William Latham|AUTHOR William Latham]], [[Alexander P. Leff|AUTHOR Alexander P. Leff]], [[Jenny Crinion|AUTHOR Jenny Crinion]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-8|PAPER Mon-2-5-8 — Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Exploiting Cross-Domain Visual Feature Generation for Disordered Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shansong Liu|AUTHOR Shansong Liu]], [[Xurong Xie|AUTHOR Xurong Xie]], [[Jianwei Yu|AUTHOR Jianwei Yu]], [[Shoukang Hu|AUTHOR Shoukang Hu]], [[Mengzhe Geng|AUTHOR Mengzhe Geng]], [[Rongfeng Su|AUTHOR Rongfeng Su]], [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1277.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-9|PAPER Mon-2-5-9 — Joint Prediction of Punctuation and Disfluency in Speech Transcripts]]</div>|<div class="cpsessionviewpapertitle">Joint Prediction of Punctuation and Disfluency in Speech Transcripts</div><div class="cpsessionviewpaperauthor">[[Binghuai Lin|AUTHOR Binghuai Lin]], [[Liyuan Wang|AUTHOR Liyuan Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-5-10|PAPER Mon-2-5-10 — Focal Loss for Punctuation Prediction]]</div>|<div class="cpsessionviewpapertitle">Focal Loss for Punctuation Prediction</div><div class="cpsessionviewpaperauthor">[[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Ye Bai|AUTHOR Ye Bai]], [[Cunhang Fan|AUTHOR Cunhang Fan]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Heiga Zen|
|^&nbsp;|^Zhenhua Ling|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-1|PAPER Mon-2-7-1 — Recognition-Synthesis Based Non-Parallel Voice Conversion with Adversarial Learning]]</div>|<div class="cpsessionviewpapertitle">Recognition-Synthesis Based Non-Parallel Voice Conversion with Adversarial Learning</div><div class="cpsessionviewpaperauthor">[[Jing-Xuan Zhang|AUTHOR Jing-Xuan Zhang]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-2|PAPER Mon-2-7-2 — Improving the Speaker Identity of Non-Parallel Many-to-Many Voice Conversion with Adversarial Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Improving the Speaker Identity of Non-Parallel Many-to-Many Voice Conversion with Adversarial Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Shaojin Ding|AUTHOR Shaojin Ding]], [[Guanlong Zhao|AUTHOR Guanlong Zhao]], [[Ricardo Gutierrez-Osuna|AUTHOR Ricardo Gutierrez-Osuna]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1310.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-3|PAPER Mon-2-7-3 — Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN]]</div>|<div class="cpsessionviewpapertitle">Non-Parallel Many-to-Many Voice Conversion with PSR-StarGAN</div><div class="cpsessionviewpaperauthor">[[Yanping Li|AUTHOR Yanping Li]], [[Dongxiang Xu|AUTHOR Dongxiang Xu]], [[Yan Zhang|AUTHOR Yan Zhang]], [[Yang Wang|AUTHOR Yang Wang]], [[Binbin Chen|AUTHOR Binbin Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1416.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-4|PAPER Mon-2-7-4 — TTS Skins: Speaker Conversion via ASR]]</div>|<div class="cpsessionviewpapertitle">TTS Skins: Speaker Conversion via ASR</div><div class="cpsessionviewpaperauthor">[[Adam Polyak|AUTHOR Adam Polyak]], [[Lior Wolf|AUTHOR Lior Wolf]], [[Yaniv Taigman|AUTHOR Yaniv Taigman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1710.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-5|PAPER Mon-2-7-5 — GAZEV: GAN-Based Zero-Shot Voice Conversion Over Non-Parallel Speech Corpus]]</div>|<div class="cpsessionviewpapertitle">GAZEV: GAN-Based Zero-Shot Voice Conversion Over Non-Parallel Speech Corpus</div><div class="cpsessionviewpaperauthor">[[Zining Zhang|AUTHOR Zining Zhang]], [[Bingsheng He|AUTHOR Bingsheng He]], [[Zhenjie Zhang|AUTHOR Zhenjie Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-6|PAPER Mon-2-7-6 — Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation]]</div>|<div class="cpsessionviewpapertitle">Spoken Content and Voice Factorization for Few-Shot Speaker Adaptation</div><div class="cpsessionviewpaperauthor">[[Tao Wang|AUTHOR Tao Wang]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Ruibo Fu|AUTHOR Ruibo Fu]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Rongxiu Zhong|AUTHOR Rongxiu Zhong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-7|PAPER Mon-2-7-7 — Unsupervised Cross-Domain Singing Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Cross-Domain Singing Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Adam Polyak|AUTHOR Adam Polyak]], [[Lior Wolf|AUTHOR Lior Wolf]], [[Yossi Adi|AUTHOR Yossi Adi]], [[Yaniv Taigman|AUTHOR Yaniv Taigman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2512.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-8|PAPER Mon-2-7-8 — Attention-Based Speaker Embeddings for One-Shot Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Attention-Based Speaker Embeddings for One-Shot Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Tatsuma Ishihara|AUTHOR Tatsuma Ishihara]], [[Daisuke Saito|AUTHOR Daisuke Saito]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2530.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-7-9|PAPER Mon-2-7-9 — Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training]]</div>|<div class="cpsessionviewpapertitle">Data Efficient Voice Cloning from Noisy Samples with Domain Adversarial Training</div><div class="cpsessionviewpaperauthor">[[Jian Cong|AUTHOR Jian Cong]], [[Shan Yang|AUTHOR Shan Yang]], [[Lei Xie|AUTHOR Lei Xie]], [[Guoqiao Yu|AUTHOR Guoqiao Yu]], [[Guanglu Wan|AUTHOR Guanglu Wan]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 8|<|
|^Chairs:&nbsp;|^Akinori Ito|
|^&nbsp;|^Kunio Kashino|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1197.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-1|PAPER Mon-2-8-1 — Gated Multi-Head Attention Pooling for Weakly Labelled Audio Tagging]]</div>|<div class="cpsessionviewpapertitle">Gated Multi-Head Attention Pooling for Weakly Labelled Audio Tagging</div><div class="cpsessionviewpaperauthor">[[Sixin Hong|AUTHOR Sixin Hong]], [[Yuexian Zou|AUTHOR Yuexian Zou]], [[Wenwu Wang|AUTHOR Wenwu Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-2|PAPER Mon-2-8-2 — Environmental Sound Classification with Parallel Temporal-Spectral Attention]]</div>|<div class="cpsessionviewpapertitle">Environmental Sound Classification with Parallel Temporal-Spectral Attention</div><div class="cpsessionviewpaperauthor">[[Helin Wang|AUTHOR Helin Wang]], [[Yuexian Zou|AUTHOR Yuexian Zou]], [[Dading Chong|AUTHOR Dading Chong]], [[Wenwu Wang|AUTHOR Wenwu Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1891.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-3|PAPER Mon-2-8-3 — Contrastive Predictive Coding of Audio with an Adversary]]</div>|<div class="cpsessionviewpapertitle">Contrastive Predictive Coding of Audio with an Adversary</div><div class="cpsessionviewpaperauthor">[[Luyu Wang|AUTHOR Luyu Wang]], [[Kazuya Kawakami|AUTHOR Kazuya Kawakami]], [[Aaron van den Oord|AUTHOR Aaron van den Oord]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-4|PAPER Mon-2-8-4 — Memory Controlled Sequential Self Attention for Sound Recognition]]</div>|<div class="cpsessionviewpapertitle">Memory Controlled Sequential Self Attention for Sound Recognition</div><div class="cpsessionviewpaperauthor">[[Arjun Pankajakshan|AUTHOR Arjun Pankajakshan]], [[Helen L. Bear|AUTHOR Helen L. Bear]], [[Vinod Subramanian|AUTHOR Vinod Subramanian]], [[Emmanouil Benetos|AUTHOR Emmanouil Benetos]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2152.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-5|PAPER Mon-2-8-5 — Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification]]</div>|<div class="cpsessionviewpapertitle">Dual Stage Learning Based Dynamic Time-Frequency Mask Generation for Audio Event Classification</div><div class="cpsessionviewpaperauthor">[[Donghyeon Kim|AUTHOR Donghyeon Kim]], [[Jaihyun Park|AUTHOR Jaihyun Park]], [[David K. Han|AUTHOR David K. Han]], [[Hanseok Ko|AUTHOR Hanseok Ko]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-6|PAPER Mon-2-8-6 — An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection]]</div>|<div class="cpsessionviewpapertitle">An Effective Perturbation Based Semi-Supervised Learning Method for Sound Event Detection</div><div class="cpsessionviewpaperauthor">[[Xu Zheng|AUTHOR Xu Zheng]], [[Yan Song|AUTHOR Yan Song]], [[Jie Yan|AUTHOR Jie Yan]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Lin Liu|AUTHOR Lin Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2791.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-7|PAPER Mon-2-8-7 — A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling]]</div>|<div class="cpsessionviewpapertitle">A Joint Framework for Audio Tagging and Weakly Supervised Acoustic Event Detection Using DenseNet with Global Average Pooling</div><div class="cpsessionviewpaperauthor">[[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]], [[Bowen Shi|AUTHOR Bowen Shi]], [[Ming Sun|AUTHOR Ming Sun]], [[Chao Wang|AUTHOR Chao Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-8|PAPER Mon-2-8-8 — Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging]]</div>|<div class="cpsessionviewpapertitle">Intra-Utterance Similarity Preserving Knowledge Distillation for Audio Tagging</div><div class="cpsessionviewpaperauthor">[[Chun-Chieh Chang|AUTHOR Chun-Chieh Chang]], [[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]], [[Ming Sun|AUTHOR Ming Sun]], [[Chao Wang|AUTHOR Chao Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3097.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-9|PAPER Mon-2-8-9 — Two-Stage Polyphonic Sound Event Detection Based on Faster R-CNN-LSTM with Multi-Token Connectionist Temporal Classification]]</div>|<div class="cpsessionviewpapertitle">Two-Stage Polyphonic Sound Event Detection Based on Faster R-CNN-LSTM with Multi-Token Connectionist Temporal Classification</div><div class="cpsessionviewpaperauthor">[[Inyoung Park|AUTHOR Inyoung Park]], [[Hong Kook Kim|AUTHOR Hong Kook Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3147.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-8-10|PAPER Mon-2-8-10 — SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations]]</div>|<div class="cpsessionviewpapertitle">SpeechMix — Augmenting Deep Sound Recognition Using Hidden Space Interpolations</div><div class="cpsessionviewpaperauthor">[[Amit Jindal|AUTHOR Amit Jindal]], [[Narayanan Elavathur Ranganatha|AUTHOR Narayanan Elavathur Ranganatha]], [[Aniket Didolkar|AUTHOR Aniket Didolkar]], [[Arijit Ghosh Chowdhury|AUTHOR Arijit Ghosh Chowdhury]], [[Di Jin|AUTHOR Di Jin]], [[Ramit Sawhney|AUTHOR Ramit Sawhney]], [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Yannik Estève|
|^&nbsp;|^Yuanzhe Zhang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1963.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-1|PAPER Mon-2-9-1 — End-to-End Neural Transformer Based Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">End-to-End Neural Transformer Based Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Martin Radfar|AUTHOR Martin Radfar]], [[Athanasios Mouchtaris|AUTHOR Athanasios Mouchtaris]], [[Siegfried Kunzmann|AUTHOR Siegfried Kunzmann]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1632.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-2|PAPER Mon-2-9-2 — Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Jointly Encoding Word Confusion Network and Dialogue Context with BERT for Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Chen Liu|AUTHOR Chen Liu]], [[Su Zhu|AUTHOR Su Zhu]], [[Zijian Zhao|AUTHOR Zijian Zhao]], [[Ruisheng Cao|AUTHOR Ruisheng Cao]], [[Lu Chen|AUTHOR Lu Chen]], [[Kai Yu|AUTHOR Kai Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-3|PAPER Mon-2-9-3 — Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces]]</div>|<div class="cpsessionviewpapertitle">Speech to Semantics: Improve ASR and NLU Jointly via All-Neural Interfaces</div><div class="cpsessionviewpaperauthor">[[Milind Rao|AUTHOR Milind Rao]], [[Anirudh Raju|AUTHOR Anirudh Raju]], [[Pranav Dheram|AUTHOR Pranav Dheram]], [[Bach Bui|AUTHOR Bach Bui]], [[Ariya Rastrow|AUTHOR Ariya Rastrow]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2456.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-4|PAPER Mon-2-9-4 — Pretrained Semantic Speech Embeddings for End-to-End Spoken Language Understanding via Cross-Modal Teacher-Student Learning]]</div>|<div class="cpsessionviewpapertitle">Pretrained Semantic Speech Embeddings for End-to-End Spoken Language Understanding via Cross-Modal Teacher-Student Learning</div><div class="cpsessionviewpaperauthor">[[Pavel Denisov|AUTHOR Pavel Denisov]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1813.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-5|PAPER Mon-2-9-5 — Context Dependent RNNLM for Automatic Transcription of Conversations]]</div>|<div class="cpsessionviewpapertitle">Context Dependent RNNLM for Automatic Transcription of Conversations</div><div class="cpsessionviewpaperauthor">[[Srikanth Raj Chetupalli|AUTHOR Srikanth Raj Chetupalli]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-6|PAPER Mon-2-9-6 — Improving End-to-End Speech-to-Intent Classification with Reptile]]</div>|<div class="cpsessionviewpapertitle">Improving End-to-End Speech-to-Intent Classification with Reptile</div><div class="cpsessionviewpaperauthor">[[Yusheng Tian|AUTHOR Yusheng Tian]], [[Philip John Gorinski|AUTHOR Philip John Gorinski]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1246.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-7|PAPER Mon-2-9-7 — Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation]]</div>|<div class="cpsessionviewpapertitle">Speech to Text Adaptation: Towards an Efficient Cross-Modal Distillation</div><div class="cpsessionviewpaperauthor">[[Won Ik Cho|AUTHOR Won Ik Cho]], [[Donghyun Kwak|AUTHOR Donghyun Kwak]], [[Ji Won Yoon|AUTHOR Ji Won Yoon]], [[Nam Soo Kim|AUTHOR Nam Soo Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-8|PAPER Mon-2-9-8 — Towards an ASR Error Robust Spoken Language Understanding System]]</div>|<div class="cpsessionviewpapertitle">Towards an ASR Error Robust Spoken Language Understanding System</div><div class="cpsessionviewpaperauthor">[[Weitong Ruan|AUTHOR Weitong Ruan]], [[Yaroslav Nechaev|AUTHOR Yaroslav Nechaev]], [[Luoxin Chen|AUTHOR Luoxin Chen]], [[Chengwei Su|AUTHOR Chengwei Su]], [[Imre Kiss|AUTHOR Imre Kiss]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2924.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-9|PAPER Mon-2-9-9 — End-to-End Spoken Language Understanding Without Full Transcripts]]</div>|<div class="cpsessionviewpapertitle">End-to-End Spoken Language Understanding Without Full Transcripts</div><div class="cpsessionviewpaperauthor">[[Hong-Kwang J. Kuo|AUTHOR Hong-Kwang J. Kuo]], [[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Samuel Thomas|AUTHOR Samuel Thomas]], [[Yinghui Huang|AUTHOR Yinghui Huang]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]], [[Gakuto Kurata|AUTHOR Gakuto Kurata]], [[Zvi Kons|AUTHOR Zvi Kons]], [[Ron Hoory|AUTHOR Ron Hoory]], [[Luis Lastras|AUTHOR Luis Lastras]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1508.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-2-9-10|PAPER Mon-2-9-10 — Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study]]</div>|<div class="cpsessionviewpapertitle">Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study</div><div class="cpsessionviewpaperauthor">[[Karthik Gopalakrishnan|AUTHOR Karthik Gopalakrishnan]], [[Behnam Hedayatnia|AUTHOR Behnam Hedayatnia]], [[Longshaokan Wang|AUTHOR Longshaokan Wang]], [[Yang Liu|AUTHOR Yang Liu]], [[Dilek Hakkani-Tür|AUTHOR Dilek Hakkani-Tür]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Preethi Jyothi|
|^&nbsp;|^Sunayana Sitaram|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1834.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-1|PAPER Mon-3-1-1 — Autosegmental Neural Nets: Should Phones and Tones be Synchronous or Asynchronous?]]</div>|<div class="cpsessionviewpapertitle">Autosegmental Neural Nets: Should Phones and Tones be Synchronous or Asynchronous?</div><div class="cpsessionviewpaperauthor">[[Jialu Li|AUTHOR Jialu Li]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2827.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-2|PAPER Mon-3-1-2 — Development of Multilingual ASR Using GlobalPhone for Less-Resourced Languages: The Case of Ethiopian Languages]]</div>|<div class="cpsessionviewpapertitle">Development of Multilingual ASR Using GlobalPhone for Less-Resourced Languages: The Case of Ethiopian Languages</div><div class="cpsessionviewpaperauthor">[[Martha Yifiru Tachbelie|AUTHOR Martha Yifiru Tachbelie]], [[Solomon Teferra Abate|AUTHOR Solomon Teferra Abate]], [[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-3|PAPER Mon-3-1-3 — Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning]]</div>|<div class="cpsessionviewpapertitle">Large-Scale End-to-End Multilingual Speech Recognition and Language Identification with Multi-Task Learning</div><div class="cpsessionviewpaperauthor">[[Wenxin Hou|AUTHOR Wenxin Hou]], [[Yue Dong|AUTHOR Yue Dong]], [[Bairong Zhuang|AUTHOR Bairong Zhuang]], [[Longfei Yang|AUTHOR Longfei Yang]], [[Jiatong Shi|AUTHOR Jiatong Shi]], [[Takahiro Shinozaki|AUTHOR Takahiro Shinozaki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2488.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-4|PAPER Mon-3-1-4 — Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Multi-Encoder-Decoder Transformer for Code-Switching Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Xinyuan Zhou|AUTHOR Xinyuan Zhou]], [[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Yanhua Long|AUTHOR Yanhua Long]], [[Yijie Li|AUTHOR Yijie Li]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2856.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-5|PAPER Mon-3-1-5 — Multilingual Acoustic and Language Modeling for Ethio-Semitic Languages]]</div>|<div class="cpsessionviewpapertitle">Multilingual Acoustic and Language Modeling for Ethio-Semitic Languages</div><div class="cpsessionviewpaperauthor">[[Solomon Teferra Abate|AUTHOR Solomon Teferra Abate]], [[Martha Yifiru Tachbelie|AUTHOR Martha Yifiru Tachbelie]], [[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2828.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-6|PAPER Mon-3-1-6 — Multilingual Jointly Trained Acoustic and Written Word Embeddings]]</div>|<div class="cpsessionviewpapertitle">Multilingual Jointly Trained Acoustic and Written Word Embeddings</div><div class="cpsessionviewpaperauthor">[[Yushi Hu|AUTHOR Yushi Hu]], [[Shane Settle|AUTHOR Shane Settle]], [[Karen Livescu|AUTHOR Karen Livescu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-7|PAPER Mon-3-1-7 — Improving Code-Switching Language Modeling with Artificially Generated Texts Using Cycle-Consistent Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">Improving Code-Switching Language Modeling with Artificially Generated Texts Using Cycle-Consistent Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Chia-Yu Li|AUTHOR Chia-Yu Li]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2219.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-8|PAPER Mon-3-1-8 — Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods]]</div>|<div class="cpsessionviewpapertitle">Data Augmentation for Code-Switch Language Modeling by Fusing Multiple Text Generation Methods</div><div class="cpsessionviewpaperauthor">[[Xinhui Hu|AUTHOR Xinhui Hu]], [[Qi Zhang|AUTHOR Qi Zhang]], [[Lei Yang|AUTHOR Lei Yang]], [[Binbin Gu|AUTHOR Binbin Gu]], [[Xinkang Xu|AUTHOR Xinkang Xu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2052.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-9|PAPER Mon-3-1-9 — A 43 Language Multilingual Punctuation Prediction Neural Network Model]]</div>|<div class="cpsessionviewpapertitle">A 43 Language Multilingual Punctuation Prediction Neural Network Model</div><div class="cpsessionviewpaperauthor">[[Xinxing Li|AUTHOR Xinxing Li]], [[Edward Lin|AUTHOR Edward Lin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2440.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-1-10|PAPER Mon-3-1-10 — Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Exploring Lexicon-Free Modeling Units for End-to-End Korean and Korean-English Code-Switching Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jisung Wang|AUTHOR Jisung Wang]], [[Jihwan Kim|AUTHOR Jihwan Kim]], [[Sangki Kim|AUTHOR Sangki Kim]], [[Yeha Lee|AUTHOR Yeha Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Ewan Dunbar|
|^&nbsp;|^Takayuki Arai|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1159.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-10-1|PAPER Mon-3-10-1 — Two Different Mechanisms of Movable Mandible for Vocal-Tract Model with Flexible Tongue]]</div>|<div class="cpsessionviewpapertitle">Two Different Mechanisms of Movable Mandible for Vocal-Tract Model with Flexible Tongue</div><div class="cpsessionviewpaperauthor">[[Takayuki Arai|AUTHOR Takayuki Arai]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1187.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-10-2|PAPER Mon-3-10-2 — Improving the Performance of Acoustic-to-Articulatory Inversion by Removing the Training Loss of Noncritical Portions of Articulatory Channels Dynamically]]</div>|<div class="cpsessionviewpapertitle">Improving the Performance of Acoustic-to-Articulatory Inversion by Removing the Training Loss of Noncritical Portions of Articulatory Channels Dynamically</div><div class="cpsessionviewpaperauthor">[[Qiang Fang|AUTHOR Qiang Fang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1222.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-10-3|PAPER Mon-3-10-3 — Speaker Conditioned Acoustic-to-Articulatory Inversion Using x-Vectors]]</div>|<div class="cpsessionviewpapertitle">Speaker Conditioned Acoustic-to-Articulatory Inversion Using x-Vectors</div><div class="cpsessionviewpaperauthor">[[Aravind Illa|AUTHOR Aravind Illa]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1432.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-10-4|PAPER Mon-3-10-4 — Coarticulation as Synchronised Sequential Target Approximation: An EMA Study]]</div>|<div class="cpsessionviewpapertitle">Coarticulation as Synchronised Sequential Target Approximation: An EMA Study</div><div class="cpsessionviewpaperauthor">[[Zirui Liu|AUTHOR Zirui Liu]], [[Yi Xu|AUTHOR Yi Xu]], [[Feng-fan Hsieh|AUTHOR Feng-fan Hsieh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-10-5|PAPER Mon-3-10-5 — Improved Model for Vocal Folds with a Polyp with Potential Application]]</div>|<div class="cpsessionviewpapertitle">Improved Model for Vocal Folds with a Polyp with Potential Application</div><div class="cpsessionviewpaperauthor">[[J^onatas Santos|AUTHOR J^onatas Santos]], [[Jugurta Montalvão|AUTHOR Jugurta Montalvão]], [[Israel Santos|AUTHOR Israel Santos]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2024.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-10-6|PAPER Mon-3-10-6 — Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics]]</div>|<div class="cpsessionviewpapertitle">Regional Resonance of the Lower Vocal Tract and its Contribution to Speaker Characteristics</div><div class="cpsessionviewpaperauthor">[[Lin Zhang|AUTHOR Lin Zhang]], [[Kiyoshi Honda|AUTHOR Kiyoshi Honda]], [[Jianguo Wei|AUTHOR Jianguo Wei]], [[Seiji Adachi|AUTHOR Seiji Adachi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2241.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-10-7|PAPER Mon-3-10-7 — Air-Tissue Boundary Segmentation in Real Time Magnetic Resonance Imaging Video Using 3-D Convolutional Neural Network]]</div>|<div class="cpsessionviewpapertitle">Air-Tissue Boundary Segmentation in Real Time Magnetic Resonance Imaging Video Using 3-D Convolutional Neural Network</div><div class="cpsessionviewpaperauthor">[[Renuka Mannem|AUTHOR Renuka Mannem]], [[Navaneetha Gaddam|AUTHOR Navaneetha Gaddam]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2709.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-10-8|PAPER Mon-3-10-8 — An Investigation of the Virtual Lip Trajectories During the Production of Bilabial Stops and Nasal at Different Speaking Rates]]</div>|<div class="cpsessionviewpapertitle">An Investigation of the Virtual Lip Trajectories During the Production of Bilabial Stops and Nasal at Different Speaking Rates</div><div class="cpsessionviewpaperauthor">[[Tilak Purohit|AUTHOR Tilak Purohit]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 11|<|
|^Chairs:&nbsp;|^Yu Tsao|
|^&nbsp;|^Jonathan Le Roux|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1397.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-1|PAPER Mon-3-11-1 — SpEx+: A Complete Time Domain Speaker Extraction Network]]</div>|<div class="cpsessionviewpapertitle">SpEx+: A Complete Time Domain Speaker Extraction Network</div><div class="cpsessionviewpaperauthor">[[Meng Ge|AUTHOR Meng Ge]], [[Chenglin Xu|AUTHOR Chenglin Xu]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1436.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-2|PAPER Mon-3-11-2 — Atss-Net: Target Speaker Separation via Attention-Based Neural Network]]</div>|<div class="cpsessionviewpapertitle">Atss-Net: Target Speaker Separation via Attention-Based Neural Network</div><div class="cpsessionviewpaperauthor">[[Tingle Li|AUTHOR Tingle Li]], [[Qingjian Lin|AUTHOR Qingjian Lin]], [[Yuanyuan Bao|AUTHOR Yuanyuan Bao]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1697.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-3|PAPER Mon-3-11-3 — Multimodal Target Speech Separation with Voice and Face References]]</div>|<div class="cpsessionviewpapertitle">Multimodal Target Speech Separation with Voice and Face References</div><div class="cpsessionviewpaperauthor">[[Leyuan Qu|AUTHOR Leyuan Qu]], [[Cornelius Weber|AUTHOR Cornelius Weber]], [[Stefan Wermter|AUTHOR Stefan Wermter]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1706.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-4|PAPER Mon-3-11-4 — X-TaSNet: Robust and Accurate Time-Domain Speaker Extraction Network]]</div>|<div class="cpsessionviewpapertitle">X-TaSNet: Robust and Accurate Time-Domain Speaker Extraction Network</div><div class="cpsessionviewpaperauthor">[[Zining Zhang|AUTHOR Zining Zhang]], [[Bingsheng He|AUTHOR Bingsheng He]], [[Zhenjie Zhang|AUTHOR Zhenjie Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2028.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-5|PAPER Mon-3-11-5 — Listen, Watch and Understand at the Cocktail Party: Audio-Visual-Contextual Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Listen, Watch and Understand at the Cocktail Party: Audio-Visual-Contextual Speech Separation</div><div class="cpsessionviewpaperauthor">[[Chenda Li|AUTHOR Chenda Li]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-6|PAPER Mon-3-11-6 — A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments]]</div>|<div class="cpsessionviewpapertitle">A Unified Framework for Low-Latency Speaker Extraction in Cocktail Party Environments</div><div class="cpsessionviewpaperauthor">[[Yunzhe Hao|AUTHOR Yunzhe Hao]], [[Jiaming Xu|AUTHOR Jiaming Xu]], [[Jing Shi|AUTHOR Jing Shi]], [[Peng Zhang|AUTHOR Peng Zhang]], [[Lei Qin|AUTHOR Lei Qin]], [[Bo Xu|AUTHOR Bo Xu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-7|PAPER Mon-3-11-7 — Time-Domain Target-Speaker Speech Separation with Waveform-Based Speaker Embedding]]</div>|<div class="cpsessionviewpapertitle">Time-Domain Target-Speaker Speech Separation with Waveform-Based Speaker Embedding</div><div class="cpsessionviewpaperauthor">[[Jianshu Zhao|AUTHOR Jianshu Zhao]], [[Shengzhou Gao|AUTHOR Shengzhou Gao]], [[Takahiro Shinozaki|AUTHOR Takahiro Shinozaki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2210.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-8|PAPER Mon-3-11-8 — Listen to What You Want: Neural Network-Based Universal Sound Selector]]</div>|<div class="cpsessionviewpapertitle">Listen to What You Want: Neural Network-Based Universal Sound Selector</div><div class="cpsessionviewpaperauthor">[[Tsubasa Ochiai|AUTHOR Tsubasa Ochiai]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Yuma Koizumi|AUTHOR Yuma Koizumi]], [[Hiroaki Ito|AUTHOR Hiroaki Ito]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Shoko Araki|AUTHOR Shoko Araki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2445.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-9|PAPER Mon-3-11-9 — Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels]]</div>|<div class="cpsessionviewpapertitle">Crossmodal Sound Retrieval Based on Specific Target Co-Occurrence Denoted with Weak Labels</div><div class="cpsessionviewpaperauthor">[[Masahiro Yasuda|AUTHOR Masahiro Yasuda]], [[Yasunori Ohishi|AUTHOR Yasunori Ohishi]], [[Yuma Koizumi|AUTHOR Yuma Koizumi]], [[Noboru Harada|AUTHOR Noboru Harada]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2483.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-11-10|PAPER Mon-3-11-10 — Speaker-Aware Monaural Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Speaker-Aware Monaural Speech Separation</div><div class="cpsessionviewpaperauthor">[[Jiahao Xu|AUTHOR Jiahao Xu]], [[Kun Hu|AUTHOR Kun Hu]], [[Chang Xu|AUTHOR Chang Xu]], [[Duc Chung Tran|AUTHOR Duc Chung Tran]], [[Zhiyong Wang|AUTHOR Zhiyong Wang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Massimiliano Todisco|
|^&nbsp;|^Masashi Unoki|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-1|PAPER Mon-3-2-1 — Multi-Task Siamese Neural Network for Improving Replay Attack Detection]]</div>|<div class="cpsessionviewpapertitle">Multi-Task Siamese Neural Network for Improving Replay Attack Detection</div><div class="cpsessionviewpaperauthor">[[Patrick von Platen|AUTHOR Patrick von Platen]], [[Fei Tao|AUTHOR Fei Tao]], [[Gokhan Tur|AUTHOR Gokhan Tur]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-2|PAPER Mon-3-2-2 — POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise]]</div>|<div class="cpsessionviewpapertitle">POCO: A Voice Spoofing and Liveness Detection Corpus Based on Pop Noise</div><div class="cpsessionviewpaperauthor">[[Kosuke Akimoto|AUTHOR Kosuke Akimoto]], [[Seng Pei Liew|AUTHOR Seng Pei Liew]], [[Sakiko Mishima|AUTHOR Sakiko Mishima]], [[Ryo Mizushima|AUTHOR Ryo Mizushima]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1255.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-3|PAPER Mon-3-2-3 — Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection]]</div>|<div class="cpsessionviewpapertitle">Dual-Adversarial Domain Adaptation for Generalized Replay Attack Detection</div><div class="cpsessionviewpaperauthor">[[Hongji Wang|AUTHOR Hongji Wang]], [[Heinrich Dinkel|AUTHOR Heinrich Dinkel]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-4|PAPER Mon-3-2-4 — Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection]]</div>|<div class="cpsessionviewpapertitle">Self-Supervised Pre-Training with Acoustic Configurations for Replay Spoofing Detection</div><div class="cpsessionviewpaperauthor">[[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-5|PAPER Mon-3-2-5 — Competency Evaluation in Voice Mimicking Using Acoustic Cues]]</div>|<div class="cpsessionviewpapertitle">Competency Evaluation in Voice Mimicking Using Acoustic Cues</div><div class="cpsessionviewpaperauthor">[[Abhijith G.|AUTHOR Abhijith G.]], [[Adharsh S.|AUTHOR Adharsh S.]], [[Akshay P. L.|AUTHOR Akshay P. L.]], [[Rajeev Rajan|AUTHOR Rajeev Rajan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1810.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-6|PAPER Mon-3-2-6 — Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks]]</div>|<div class="cpsessionviewpapertitle">Light Convolutional Neural Network with Feature Genuinization for Detection of Synthetic Speech Attacks</div><div class="cpsessionviewpaperauthor">[[Zhenzong Wu|AUTHOR Zhenzong Wu]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Jichen Yang|AUTHOR Jichen Yang]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1844.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-7|PAPER Mon-3-2-7 — Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers]]</div>|<div class="cpsessionviewpapertitle">Spoofing Attack Detection Using the Non-Linear Fusion of Sub-Band Classifiers</div><div class="cpsessionviewpaperauthor">[[Hemlata Tak|AUTHOR Hemlata Tak]], [[Jose Patino|AUTHOR Jose Patino]], [[Andreas Nautsch|AUTHOR Andreas Nautsch]], [[Nicholas Evans|AUTHOR Nicholas Evans]], [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2039.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-8|PAPER Mon-3-2-8 — Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions]]</div>|<div class="cpsessionviewpapertitle">Investigating Light-ResNet Architecture for Spoofing Detection Under Mismatched Conditions</div><div class="cpsessionviewpaperauthor">[[Prasanth Parasu|AUTHOR Prasanth Parasu]], [[Julien Epps|AUTHOR Julien Epps]], [[Kaavya Sriskandaraja|AUTHOR Kaavya Sriskandaraja]], [[Gajan Suthokumar|AUTHOR Gajan Suthokumar]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-2-9|PAPER Mon-3-2-9 — Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection]]</div>|<div class="cpsessionviewpapertitle">Siamese Convolutional Neural Network Using Gaussian Probability Feature for Spoofing Speech Detection</div><div class="cpsessionviewpaperauthor">[[Zhenchun Lei|AUTHOR Zhenchun Lei]], [[Yingen Yang|AUTHOR Yingen Yang]], [[Changhong Liu|AUTHOR Changhong Liu]], [[Jihua Ye|AUTHOR Jihua Ye]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Fei Chen|
|^&nbsp;|^Henning Schepker|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-1|PAPER Mon-3-3-1 — Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Lightweight Online Noise Reduction on Embedded Devices Using Hierarchical Recurrent Neural Networks</div><div class="cpsessionviewpaperauthor">[[H. Schröter|AUTHOR H. Schröter]], [[T. Rosenkranz|AUTHOR T. Rosenkranz]], [[A.N. Escalante-B.|AUTHOR A.N. Escalante-B.]], [[P. Zobel|AUTHOR P. Zobel]], [[Andreas Maier|AUTHOR Andreas Maier]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1563.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-2|PAPER Mon-3-3-2 — SEANet: A Multi-Modal Speech Enhancement Network]]</div>|<div class="cpsessionviewpapertitle">SEANet: A Multi-Modal Speech Enhancement Network</div><div class="cpsessionviewpaperauthor">[[Marco Tagliasacchi|AUTHOR Marco Tagliasacchi]], [[Yunpeng Li|AUTHOR Yunpeng Li]], [[Karolis Misiunas|AUTHOR Karolis Misiunas]], [[Dominik Roblek|AUTHOR Dominik Roblek]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1617.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-3|PAPER Mon-3-3-3 — Lite Audio-Visual Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Lite Audio-Visual Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Shang-Yi Chuang|AUTHOR Shang-Yi Chuang]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Chen-Chou Lo|AUTHOR Chen-Chou Lo]], [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-4|PAPER Mon-3-3-4 — ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication]]</div>|<div class="cpsessionviewpapertitle">ORCA-CLEAN: A Deep Denoising Toolkit for Killer Whale Communication</div><div class="cpsessionviewpaperauthor">[[Christian Bergler|AUTHOR Christian Bergler]], [[Manuel Schmitt|AUTHOR Manuel Schmitt]], [[Andreas Maier|AUTHOR Andreas Maier]], [[Simeon Smeele|AUTHOR Simeon Smeele]], [[Volker Barth|AUTHOR Volker Barth]], [[Elmar Nöth|AUTHOR Elmar Nöth]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-5|PAPER Mon-3-3-5 — A Deep Learning Approach to Active Noise Control]]</div>|<div class="cpsessionviewpapertitle">A Deep Learning Approach to Active Noise Control</div><div class="cpsessionviewpaperauthor">[[Hao Zhang|AUTHOR Hao Zhang]], [[DeLiang Wang|AUTHOR DeLiang Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-6|PAPER Mon-3-3-6 — Improving Speech Intelligibility Through Speaker Dependent and Independent Spectral Style Conversion]]</div>|<div class="cpsessionviewpapertitle">Improving Speech Intelligibility Through Speaker Dependent and Independent Spectral Style Conversion</div><div class="cpsessionviewpaperauthor">[[Tuan Dinh|AUTHOR Tuan Dinh]], [[Alexander Kain|AUTHOR Alexander Kain]], [[Kris Tjaden|AUTHOR Kris Tjaden]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1740.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-7|PAPER Mon-3-3-7 — End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks]]</div>|<div class="cpsessionviewpapertitle">End-to-End Speech Intelligibility Prediction Using Time-Domain Fully Convolutional Neural Networks</div><div class="cpsessionviewpaperauthor">[[Mathias B. Pedersen|AUTHOR Mathias B. Pedersen]], [[Morten Kolbæk|AUTHOR Morten Kolbæk]], [[Asger H. Andersen|AUTHOR Asger H. Andersen]], [[Søren H. Jensen|AUTHOR Søren H. Jensen]], [[Jesper Jensen|AUTHOR Jesper Jensen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1591.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-8|PAPER Mon-3-3-8 — Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System]]</div>|<div class="cpsessionviewpapertitle">Predicting Intelligibility of Enhanced Speech Using Posteriors Derived from DNN-Based ASR System</div><div class="cpsessionviewpaperauthor">[[Kenichi Arai|AUTHOR Kenichi Arai]], [[Shoko Araki|AUTHOR Shoko Araki]], [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]], [[Toshio Irino|AUTHOR Toshio Irino]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2121.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-9|PAPER Mon-3-3-9 — Automatic Estimation of Intelligibility Measure for Consonants in Speech]]</div>|<div class="cpsessionviewpapertitle">Automatic Estimation of Intelligibility Measure for Consonants in Speech</div><div class="cpsessionviewpaperauthor">[[Ali Abavisani|AUTHOR Ali Abavisani]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2883.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-3-10|PAPER Mon-3-3-10 — Large Scale Evaluation of Importance Maps in Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Large Scale Evaluation of Importance Maps in Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Viet Anh Trinh|AUTHOR Viet Anh Trinh]], [[Michael I. Mandel|AUTHOR Michael I. Mandel]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Tuomas Virtanen|
|^&nbsp;|^Pengyuan Zhang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-1|PAPER Mon-3-4-1 — Neural Architecture Search on Acoustic Scene Classification]]</div>|<div class="cpsessionviewpapertitle">Neural Architecture Search on Acoustic Scene Classification</div><div class="cpsessionviewpaperauthor">[[Jixiang Li|AUTHOR Jixiang Li]], [[Chuming Liang|AUTHOR Chuming Liang]], [[Bo Zhang|AUTHOR Bo Zhang]], [[Zhao Wang|AUTHOR Zhao Wang]], [[Fei Xiang|AUTHOR Fei Xiang]], [[Xiangxiang Chu|AUTHOR Xiangxiang Chu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0992.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-2|PAPER Mon-3-4-2 — Acoustic Scene Classification Using Audio Tagging]]</div>|<div class="cpsessionviewpapertitle">Acoustic Scene Classification Using Audio Tagging</div><div class="cpsessionviewpaperauthor">[[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ju-ho Kim|AUTHOR Ju-ho Kim]], [[Seung-bin Kim|AUTHOR Seung-bin Kim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1151.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-3|PAPER Mon-3-4-3 — ATReSN-Net: Capturing Attentive Temporal Relations in Semantic Neighborhood for Acoustic Scene Classification]]</div>|<div class="cpsessionviewpapertitle">ATReSN-Net: Capturing Attentive Temporal Relations in Semantic Neighborhood for Acoustic Scene Classification</div><div class="cpsessionviewpaperauthor">[[Liwen Zhang|AUTHOR Liwen Zhang]], [[Jiqing Han|AUTHOR Jiqing Han]], [[Ziqiang Shi|AUTHOR Ziqiang Shi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1303.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-4|PAPER Mon-3-4-4 — Environment Sound Classification Using Multiple Feature Channels and Attention Based Deep Convolutional Neural Network]]</div>|<div class="cpsessionviewpapertitle">Environment Sound Classification Using Multiple Feature Channels and Attention Based Deep Convolutional Neural Network</div><div class="cpsessionviewpaperauthor">[[Jivitesh Sharma|AUTHOR Jivitesh Sharma]], [[Ole-Christoffer Granmo|AUTHOR Ole-Christoffer Granmo]], [[Morten Goodwin|AUTHOR Morten Goodwin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-5|PAPER Mon-3-4-5 — Acoustic Scene Analysis with Multi-Head Attention Networks]]</div>|<div class="cpsessionviewpapertitle">Acoustic Scene Analysis with Multi-Head Attention Networks</div><div class="cpsessionviewpaperauthor">[[Weimin Wang|AUTHOR Weimin Wang]], [[Weiran Wang|AUTHOR Weiran Wang]], [[Ming Sun|AUTHOR Ming Sun]], [[Chao Wang|AUTHOR Chao Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-6|PAPER Mon-3-4-6 — Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification]]</div>|<div class="cpsessionviewpapertitle">Relational Teacher Student Learning with Neural Label Embedding for Device Adaptation in Acoustic Scene Classification</div><div class="cpsessionviewpaperauthor">[[Hu Hu|AUTHOR Hu Hu]], [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]], [[Yannan Wang|AUTHOR Yannan Wang]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-7|PAPER Mon-3-4-7 — An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances]]</div>|<div class="cpsessionviewpapertitle">An Acoustic Segment Model Based Segment Unit Selection Approach to Acoustic Scene Classification with Partial Utterances</div><div class="cpsessionviewpaperauthor">[[Hu Hu|AUTHOR Hu Hu]], [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]], [[Yannan Wang|AUTHOR Yannan Wang]], [[Xue Bai|AUTHOR Xue Bai]], [[Jun Du|AUTHOR Jun Du]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2476.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-8|PAPER Mon-3-4-8 — Attention-Driven Projections for Soundscape Classification]]</div>|<div class="cpsessionviewpapertitle">Attention-Driven Projections for Soundscape Classification</div><div class="cpsessionviewpaperauthor">[[Dhanunjaya Varma Devalraju|AUTHOR Dhanunjaya Varma Devalraju]], [[Muralikrishna H.|AUTHOR Muralikrishna H.]], [[Padmanabhan Rajan|AUTHOR Padmanabhan Rajan]], [[Dileep Aroor Dinesh|AUTHOR Dileep Aroor Dinesh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2655.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-9|PAPER Mon-3-4-9 — Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection]]</div>|<div class="cpsessionviewpapertitle">Computer Audition for Continuous Rainforest Occupancy Monitoring: The Case of Bornean Gibbons’ Call Detection</div><div class="cpsessionviewpaperauthor">[[Panagiotis Tzirakis|AUTHOR Panagiotis Tzirakis]], [[Alexander Shiarella|AUTHOR Alexander Shiarella]], [[Robert Ewers|AUTHOR Robert Ewers]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-4-10|PAPER Mon-3-4-10 — Deep Learning Based Open Set Acoustic Scene Classification]]</div>|<div class="cpsessionviewpapertitle">Deep Learning Based Open Set Acoustic Scene Classification</div><div class="cpsessionviewpaperauthor">[[Zuzanna Kwiatkowska|AUTHOR Zuzanna Kwiatkowska]], [[Beniamin Kalinowski|AUTHOR Beniamin Kalinowski]], [[Michał Kośmider|AUTHOR Michał Kośmider]], [[Krzysztof Rykaczewski|AUTHOR Krzysztof Rykaczewski]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Shengchen Li|
|^&nbsp;|^Yi Yu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1399.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-5-1|PAPER Mon-3-5-1 — Singing Synthesis: With a Little Help from my Attention]]</div>|<div class="cpsessionviewpapertitle">Singing Synthesis: With a Little Help from my Attention</div><div class="cpsessionviewpaperauthor">[[Orazio Angelini|AUTHOR Orazio Angelini]], [[Alexis Moinet|AUTHOR Alexis Moinet]], [[Kayoko Yanagisawa|AUTHOR Kayoko Yanagisawa]], [[Thomas Drugman|AUTHOR Thomas Drugman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-5-2|PAPER Mon-3-5-2 — Peking Opera Synthesis via Duration Informed Attention Network]]</div>|<div class="cpsessionviewpapertitle">Peking Opera Synthesis via Duration Informed Attention Network</div><div class="cpsessionviewpaperauthor">[[Yusong Wu|AUTHOR Yusong Wu]], [[Shengchen Li|AUTHOR Shengchen Li]], [[Chengzhu Yu|AUTHOR Chengzhu Yu]], [[Heng Lu|AUTHOR Heng Lu]], [[Chao Weng|AUTHOR Chao Weng]], [[Liqiang Zhang|AUTHOR Liqiang Zhang]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1789.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-5-3|PAPER Mon-3-5-3 — DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System]]</div>|<div class="cpsessionviewpapertitle">DurIAN-SC: Duration Informed Attention Network Based Singing Voice Conversion System</div><div class="cpsessionviewpaperauthor">[[Liqiang Zhang|AUTHOR Liqiang Zhang]], [[Chengzhu Yu|AUTHOR Chengzhu Yu]], [[Heng Lu|AUTHOR Heng Lu]], [[Chao Weng|AUTHOR Chao Weng]], [[Chunlei Zhang|AUTHOR Chunlei Zhang]], [[Yusong Wu|AUTHOR Yusong Wu]], [[Xiang Xie|AUTHOR Xiang Xie]], [[Zijin Li|AUTHOR Zijin Li]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1806.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-5-4|PAPER Mon-3-5-4 — Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music]]</div>|<div class="cpsessionviewpapertitle">Transfer Learning for Improving Singing-Voice Detection in Polyphonic Instrumental Music</div><div class="cpsessionviewpaperauthor">[[Yuanbo Hou|AUTHOR Yuanbo Hou]], [[Frank K. Soong|AUTHOR Frank K. Soong]], [[Jian Luan|AUTHOR Jian Luan]], [[Shengchen Li|AUTHOR Shengchen Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2555.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-5-5|PAPER Mon-3-5-5 — Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music]]</div>|<div class="cpsessionviewpapertitle">Channel-Wise Subband Input for Better Voice and Accompaniment Separation on High Resolution Music</div><div class="cpsessionviewpaperauthor">[[Haohe Liu|AUTHOR Haohe Liu]], [[Lei Xie|AUTHOR Lei Xie]], [[Jian Wu|AUTHOR Jian Wu]], [[Geng Yang|AUTHOR Geng Yang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Jiangyan Yi|
|^&nbsp;|^Hui Zhang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2962.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-1|PAPER Mon-3-7-1 — Continual Learning in Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Continual Learning in Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Samik Sadhu|AUTHOR Samik Sadhu]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1727.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-2|PAPER Mon-3-7-2 — Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism]]</div>|<div class="cpsessionviewpapertitle">Speaker Adaptive Training for Speech Recognition Based on Attention-Over-Attention Mechanism</div><div class="cpsessionviewpaperauthor">[[Genshun Wan|AUTHOR Genshun Wan]], [[Jia Pan|AUTHOR Jia Pan]], [[Qingran Wang|AUTHOR Qingran Wang]], [[Jianqing Gao|AUTHOR Jianqing Gao]], [[Zhongfu Ye|AUTHOR Zhongfu Ye]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-3|PAPER Mon-3-7-3 — Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator]]</div>|<div class="cpsessionviewpapertitle">Rapid RNN-T Adaptation Using Personalized Speech Synthesis and Neural Language Generator</div><div class="cpsessionviewpaperauthor">[[Yan Huang|AUTHOR Yan Huang]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Lei He|AUTHOR Lei He]], [[Wenning Wei|AUTHOR Wenning Wei]], [[William Gale|AUTHOR William Gale]], [[Yifan Gong|AUTHOR Yifan Gong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1281.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-4|PAPER Mon-3-7-4 — Speech Transformer with Speaker Aware Persistent Memory]]</div>|<div class="cpsessionviewpapertitle">Speech Transformer with Speaker Aware Persistent Memory</div><div class="cpsessionviewpaperauthor">[[Yingzhu Zhao|AUTHOR Yingzhu Zhao]], [[Chongjia Ni|AUTHOR Chongjia Ni]], [[Cheung-Chi Leung|AUTHOR Cheung-Chi Leung]], [[Shafiq Joty|AUTHOR Shafiq Joty]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Bin Ma|AUTHOR Bin Ma]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1390.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-5|PAPER Mon-3-7-5 — Adaptive Speaker Normalization for CTC-Based Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Adaptive Speaker Normalization for CTC-Based Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Fenglin Ding|AUTHOR Fenglin Ding]], [[Wu Guo|AUTHOR Wu Guo]], [[Bin Gu|AUTHOR Bin Gu]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Jun Du|AUTHOR Jun Du]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1861.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-6|PAPER Mon-3-7-6 — Unsupervised Domain Adaptation Under Label Space Mismatch for Speech Classification]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Domain Adaptation Under Label Space Mismatch for Speech Classification</div><div class="cpsessionviewpaperauthor">[[Akhil Mathur|AUTHOR Akhil Mathur]], [[Nadia Berthouze|AUTHOR Nadia Berthouze]], [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-7|PAPER Mon-3-7-7 — Learning Fast Adaptation on Cross-Accented Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Learning Fast Adaptation on Cross-Accented Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Genta Indra Winata|AUTHOR Genta Indra Winata]], [[Samuel Cahyawijaya|AUTHOR Samuel Cahyawijaya]], [[Zihan Liu|AUTHOR Zihan Liu]], [[Zhaojiang Lin|AUTHOR Zhaojiang Lin]], [[Andrea Madotto|AUTHOR Andrea Madotto]], [[Peng Xu|AUTHOR Peng Xu]], [[Pascale Fung|AUTHOR Pascale Fung]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-8|PAPER Mon-3-7-8 — Black-Box Adaptation of ASR for Accented Speech]]</div>|<div class="cpsessionviewpapertitle">Black-Box Adaptation of ASR for Accented Speech</div><div class="cpsessionviewpaperauthor">[[Kartik Khandelwal|AUTHOR Kartik Khandelwal]], [[Preethi Jyothi|AUTHOR Preethi Jyothi]], [[Abhijeet Awasthi|AUTHOR Abhijeet Awasthi]], [[Sunita Sarawagi|AUTHOR Sunita Sarawagi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2742.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-9|PAPER Mon-3-7-9 — Achieving Multi-Accent ASR via Unsupervised Acoustic Model Adaptation]]</div>|<div class="cpsessionviewpapertitle">Achieving Multi-Accent ASR via Unsupervised Acoustic Model Adaptation</div><div class="cpsessionviewpaperauthor">[[M.A. Tuğtekin Turan|AUTHOR M.A. Tuğtekin Turan]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]], [[Denis Jouvet|AUTHOR Denis Jouvet]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1301.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-7-10|PAPER Mon-3-7-10 — Frame-Wise Online Unsupervised Adaptation of DNN-HMM Acoustic Model from Perspective of Robust Adaptive Filtering]]</div>|<div class="cpsessionviewpapertitle">Frame-Wise Online Unsupervised Adaptation of DNN-HMM Acoustic Model from Perspective of Robust Adaptive Filtering</div><div class="cpsessionviewpaperauthor">[[Ryu Takeda|AUTHOR Ryu Takeda]], [[Kazunori Komatani|AUTHOR Kazunori Komatani]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 8|<|
|^Chairs:&nbsp;|^Esther Klabbers|
|^&nbsp;|^Hema Murthy|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1109.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-8-1|PAPER Mon-3-8-1 — Adversarially Trained Multi-Singer Sequence-to-Sequence Singing Synthesizer]]</div>|<div class="cpsessionviewpapertitle">Adversarially Trained Multi-Singer Sequence-to-Sequence Singing Synthesizer</div><div class="cpsessionviewpaperauthor">[[Jie Wu|AUTHOR Jie Wu]], [[Jian Luan|AUTHOR Jian Luan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1218.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-8-2|PAPER Mon-3-8-2 — Prediction of Head Motion from Speech Waveforms with a Canonical-Correlation-Constrained Autoencoder]]</div>|<div class="cpsessionviewpapertitle">Prediction of Head Motion from Speech Waveforms with a Canonical-Correlation-Constrained Autoencoder</div><div class="cpsessionviewpaperauthor">[[JinHong Lu|AUTHOR JinHong Lu]], [[Hiroshi Shimodaira|AUTHOR Hiroshi Shimodaira]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1410.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-8-3|PAPER Mon-3-8-3 — XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System]]</div>|<div class="cpsessionviewpapertitle">XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System</div><div class="cpsessionviewpaperauthor">[[Peiling Lu|AUTHOR Peiling Lu]], [[Jie Wu|AUTHOR Jie Wu]], [[Jian Luan|AUTHOR Jian Luan]], [[Xu Tan|AUTHOR Xu Tan]], [[Li Zhou|AUTHOR Li Zhou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-8-4|PAPER Mon-3-8-4 — Stochastic Talking Face Generation Using Latent Distribution Matching]]</div>|<div class="cpsessionviewpapertitle">Stochastic Talking Face Generation Using Latent Distribution Matching</div><div class="cpsessionviewpaperauthor">[[Ravindra Yadav|AUTHOR Ravindra Yadav]], [[Ashish Sardana|AUTHOR Ashish Sardana]], [[Vinay P. Namboodiri|AUTHOR Vinay P. Namboodiri]], [[Rajesh M. Hegde|AUTHOR Rajesh M. Hegde]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1984.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-8-5|PAPER Mon-3-8-5 — Speech-to-Singing Conversion Based on Boundary Equilibrium GAN]]</div>|<div class="cpsessionviewpapertitle">Speech-to-Singing Conversion Based on Boundary Equilibrium GAN</div><div class="cpsessionviewpaperauthor">[[Da-Yi Wu|AUTHOR Da-Yi Wu]], [[Yi-Hsuan Yang|AUTHOR Yi-Hsuan Yang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2136.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-8-6|PAPER Mon-3-8-6 — Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image]]</div>|<div class="cpsessionviewpapertitle">Face2Speech: Towards Multi-Speaker Text-to-Speech Synthesis Using an Embedding Vector Predicted from a Face Image</div><div class="cpsessionviewpaperauthor">[[Shunsuke Goto|AUTHOR Shunsuke Goto]], [[Kotaro Onishi|AUTHOR Kotaro Onishi]], [[Yuki Saito|AUTHOR Yuki Saito]], [[Kentaro Tachibana|AUTHOR Kentaro Tachibana]], [[Koichiro Mori|AUTHOR Koichiro Mori]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2304.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-8-7|PAPER Mon-3-8-7 — Speech Driven Talking Head Generation via Attentional Landmarks Based Representation]]</div>|<div class="cpsessionviewpapertitle">Speech Driven Talking Head Generation via Attentional Landmarks Based Representation</div><div class="cpsessionviewpaperauthor">[[Wentao Wang|AUTHOR Wentao Wang]], [[Yan Wang|AUTHOR Yan Wang]], [[Jianqing Sun|AUTHOR Jianqing Sun]], [[Qingsong Liu|AUTHOR Qingsong Liu]], [[Jiaen Liang|AUTHOR Jiaen Liang]], [[Teng Li|AUTHOR Teng Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Monday 26 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Cassia Valentini-Botinhao|
|^&nbsp;|^Jan Rennies|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0093.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-9-1|PAPER Mon-3-9-1 — Optimization and Evaluation of an Intelligibility-Improving Signal Processing Approach (IISPA) for the Hurricane Challenge 2.0 with FADE]]</div>|<div class="cpsessionviewpapertitle">Optimization and Evaluation of an Intelligibility-Improving Signal Processing Approach (IISPA) for the Hurricane Challenge 2.0 with FADE</div><div class="cpsessionviewpaperauthor">[[Marc René Schädler|AUTHOR Marc René Schädler]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-9-2|PAPER Mon-3-9-2 — iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning]]</div>|<div class="cpsessionviewpapertitle">iMetricGAN: Intelligibility Enhancement for Speech-in-Noise Using Generative Adversarial Network-Based Metric Learning</div><div class="cpsessionviewpaperauthor">[[Haoyu Li|AUTHOR Haoyu Li]], [[Szu-Wei Fu|AUTHOR Szu-Wei Fu]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1641.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-9-3|PAPER Mon-3-9-3 — Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0]]</div>|<div class="cpsessionviewpapertitle">Intelligibility-Enhancing Speech Modifications — The Hurricane Challenge 2.0</div><div class="cpsessionviewpaperauthor">[[Jan Rennies|AUTHOR Jan Rennies]], [[Henning Schepker|AUTHOR Henning Schepker]], [[Cassia Valentini-Botinhao|AUTHOR Cassia Valentini-Botinhao]], [[Martin Cooke|AUTHOR Martin Cooke]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-9-4|PAPER Mon-3-9-4 — Exploring Listeners’ Speech Rate Preferences]]</div>|<div class="cpsessionviewpapertitle">Exploring Listeners’ Speech Rate Preferences</div><div class="cpsessionviewpaperauthor">[[Olympia Simantiraki|AUTHOR Olympia Simantiraki]], [[Martin Cooke|AUTHOR Martin Cooke]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-9-5|PAPER Mon-3-9-5 — Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation]]</div>|<div class="cpsessionviewpapertitle">Adaptive Compressive Onset-Enhancement for Improved Speech Intelligibility in Noise and Reverberation</div><div class="cpsessionviewpaperauthor">[[Felicitas Bederna|AUTHOR Felicitas Bederna]], [[Henning Schepker|AUTHOR Henning Schepker]], [[Christian Rollwage|AUTHOR Christian Rollwage]], [[Simon Doclo|AUTHOR Simon Doclo]], [[Arne Pusch|AUTHOR Arne Pusch]], [[Jörg Bitzer|AUTHOR Jörg Bitzer]], [[Jan Rennies|AUTHOR Jan Rennies]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2748.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-9-6|PAPER Mon-3-9-6 — A Sound Engineering Approach to Near End Listening Enhancement]]</div>|<div class="cpsessionviewpapertitle">A Sound Engineering Approach to Near End Listening Enhancement</div><div class="cpsessionviewpaperauthor">[[Carol Chermaz|AUTHOR Carol Chermaz]], [[Simon King|AUTHOR Simon King]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2793.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-3-9-7|PAPER Mon-3-9-7 — Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion]]</div>|<div class="cpsessionviewpapertitle">Enhancing Speech Intelligibility in Text-To-Speech Synthesis Using Speaking Style Conversion</div><div class="cpsessionviewpaperauthor">[[Dipjyoti Paul|AUTHOR Dipjyoti Paul]], [[Muhammed P.V. Shifas|AUTHOR Muhammed P.V. Shifas]], [[Yannis Pantazis|AUTHOR Yannis Pantazis]], [[Yannis Stylianou|AUTHOR Yannis Stylianou]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Monday 26 October 2020, Room 6|<|
|^Chairs:&nbsp;|^Kate Knill|
|^&nbsp;|^Daniele Falavigna|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-1|PAPER Mon-SS-1-6-1 — Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech]]</div>|<div class="cpsessionviewpapertitle">Overview of the Interspeech TLT2020 Shared Task on ASR for Non-Native Children’s Speech</div><div class="cpsessionviewpaperauthor">[[Roberto Gretter|AUTHOR Roberto Gretter]], [[Marco Matassoni|AUTHOR Marco Matassoni]], [[Daniele Falavigna|AUTHOR Daniele Falavigna]], [[Keelan Evanini|AUTHOR Keelan Evanini]], [[Chee Wee Leong|AUTHOR Chee Wee Leong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge]]</div>|<div class="cpsessionviewpapertitle">The NTNU System at the Interspeech 2020 Non-Native Children’s Speech ASR Challenge</div><div class="cpsessionviewpaperauthor">[[Tien-Hong Lo|AUTHOR Tien-Hong Lo]], [[Fu-An Chao|AUTHOR Fu-An Chao]], [[Shi-Yan Weng|AUTHOR Shi-Yan Weng]], [[Berlin Chen|AUTHOR Berlin Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2154.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems]]</div>|<div class="cpsessionviewpapertitle">Non-Native Children’s Automatic Speech Recognition: The INTERSPEECH 2020 Shared Task ALTA Systems</div><div class="cpsessionviewpaperauthor">[[Kate M. Knill|AUTHOR Kate M. Knill]], [[Linlin Wang|AUTHOR Linlin Wang]], [[Yu Wang|AUTHOR Yu Wang]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech]]</div>|<div class="cpsessionviewpapertitle">Data Augmentation Using Prosody and False Starts to Recognize Non-Native Children’s Speech</div><div class="cpsessionviewpaperauthor">[[Hemant Kathania|AUTHOR Hemant Kathania]], [[Mittul Singh|AUTHOR Mittul Singh]], [[Tamás Grósz|AUTHOR Tamás Grósz]], [[Mikko Kurimo|AUTHOR Mikko Kurimo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech]]</div>|<div class="cpsessionviewpapertitle">UNSW System Description for the Shared Task on Automatic Speech Recognition for Non-Native Children’s Speech</div><div class="cpsessionviewpaperauthor">[[Mostafa Shahin|AUTHOR Mostafa Shahin]], [[Renée Lu|AUTHOR Renée Lu]], [[Julien Epps|AUTHOR Julien Epps]], [[Beena Ahmed|AUTHOR Beena Ahmed]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Monday 26 October 2020, Room 6|<|
|^Chairs:&nbsp;|^Hossein Zeinali|
|^&nbsp;|^Kong Aik Lee|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1188.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — Improving X-Vector and PLDA for Text-Dependent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Improving X-Vector and PLDA for Text-Dependent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Zhuxin Chen|AUTHOR Zhuxin Chen]], [[Yue Lin|AUTHOR Yue Lin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-2|PAPER Mon-SS-2-6-2 — SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">SdSV Challenge 2020: Large-Scale Evaluation of Short-Duration Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Hossein Zeinali|AUTHOR Hossein Zeinali]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Jahangir Alam|AUTHOR Jahangir Alam]], [[Lukáš Burget|AUTHOR Lukáš Burget]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1704.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-3|PAPER Mon-SS-2-6-3 — The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020]]</div>|<div class="cpsessionviewpapertitle">The XMUSPEECH System for Short-Duration Speaker Verification Challenge 2020</div><div class="cpsessionviewpaperauthor">[[Tao Jiang|AUTHOR Tao Jiang]], [[Miao Zhao|AUTHOR Miao Zhao]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2183.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020]]</div>|<div class="cpsessionviewpapertitle">Robust Text-Dependent Speaker Verification via Character-Level Information Preservation for the SdSV Challenge 2020</div><div class="cpsessionviewpaperauthor">[[Sung Hwan Mun|AUTHOR Sung Hwan Mun]], [[Woo Hyun Kang|AUTHOR Woo Hyun Kang]], [[Min Hyun Han|AUTHOR Min Hyun Han]], [[Nam Soo Kim|AUTHOR Nam Soo Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2233.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-5|PAPER Mon-SS-2-6-5 — The TalTech Systems for the Short-Duration Speaker Verification Challenge 2020]]</div>|<div class="cpsessionviewpapertitle">The TalTech Systems for the Short-Duration Speaker Verification Challenge 2020</div><div class="cpsessionviewpaperauthor">[[Tanel Alumäe|AUTHOR Tanel Alumäe]], [[Jörgen Valk|AUTHOR Jörgen Valk]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Investigation of NICT Submission for Short-Duration Speaker Verification Challenge 2020]]</div>|<div class="cpsessionviewpapertitle">Investigation of NICT Submission for Short-Duration Speaker Verification Challenge 2020</div><div class="cpsessionviewpaperauthor">[[Peng Shen|AUTHOR Peng Shen]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Cross-Lingual Speaker Verification with Domain-Balanced Hard Prototype Mining and Language-Dependent Score Normalization]]</div>|<div class="cpsessionviewpapertitle">Cross-Lingual Speaker Verification with Domain-Balanced Hard Prototype Mining and Language-Dependent Score Normalization</div><div class="cpsessionviewpaperauthor">[[Jenthe Thienpondt|AUTHOR Jenthe Thienpondt]], [[Brecht Desplanques|AUTHOR Brecht Desplanques]], [[Kris Demuynck|AUTHOR Kris Demuynck]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020]]</div>|<div class="cpsessionviewpapertitle">BUT Text-Dependent Speaker Verification System for SdSV Challenge 2020</div><div class="cpsessionviewpaperauthor">[[Alicia Lozano-Diez|AUTHOR Alicia Lozano-Diez]], [[Anna Silnova|AUTHOR Anna Silnova]], [[Bhargav Pulugundla|AUTHOR Bhargav Pulugundla]], [[Johan Rohdin|AUTHOR Johan Rohdin]], [[Karel Veselý|AUTHOR Karel Veselý]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Ondřej Glembek|AUTHOR Ondřej Glembek]], [[Ondvrej Novotný|AUTHOR Ondvrej Novotný]], [[Pavel Matějka|AUTHOR Pavel Matějka]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2957.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-9|PAPER Mon-SS-2-6-9 — Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Exploring the Use of an Unsupervised Autoregressive Model as a Shared Encoder for Text-Dependent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Vijay Ravi|AUTHOR Vijay Ravi]], [[Ruchao Fan|AUTHOR Ruchao Fan]], [[Amber Afshan|AUTHOR Amber Afshan]], [[Huanhua Lu|AUTHOR Huanhua Lu]], [[Abeer Alwan|AUTHOR Abeer Alwan]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Helen Meng|
|^&nbsp;|^Tomoki Toda|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-1|PAPER Thu-1-1-1 — Vocoder-Based Speech Synthesis from Silent Videos]]</div>|<div class="cpsessionviewpapertitle">Vocoder-Based Speech Synthesis from Silent Videos</div><div class="cpsessionviewpaperauthor">[[Daniel Michelsanti|AUTHOR Daniel Michelsanti]], [[Olga Slizovskaia|AUTHOR Olga Slizovskaia]], [[Gloria Haro|AUTHOR Gloria Haro]], [[Emilia Gómez|AUTHOR Emilia Gómez]], [[Zheng-Hua Tan|AUTHOR Zheng-Hua Tan]], [[Jesper Jensen|AUTHOR Jesper Jensen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-2|PAPER Thu-1-1-2 — Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|<div class="cpsessionviewpapertitle">Quasi-Periodic Parallel WaveGAN Vocoder: A Non-Autoregressive Pitch-Dependent Dilated Convolution Model for Parametric Speech Generation</div><div class="cpsessionviewpaperauthor">[[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Takuma Okamoto|AUTHOR Takuma Okamoto]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1072.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-3|PAPER Thu-1-1-3 — A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems]]</div>|<div class="cpsessionviewpapertitle">A Cyclical Post-Filtering Approach to Mismatch Refinement of Neural Vocoder for Text-to-Speech Systems</div><div class="cpsessionviewpaperauthor">[[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]], [[Kazuki Yasuhara|AUTHOR Kazuki Yasuhara]], [[Noriyuki Matsunaga|AUTHOR Noriyuki Matsunaga]], [[Yamato Ohtani|AUTHOR Yamato Ohtani]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-4|PAPER Thu-1-1-4 — Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder]]</div>|<div class="cpsessionviewpapertitle">Audio Dequantization for High Fidelity Audio Generation in Flow-Based Neural Vocoder</div><div class="cpsessionviewpaperauthor">[[Hyun-Wook Yoon|AUTHOR Hyun-Wook Yoon]], [[Sang-Hoon Lee|AUTHOR Sang-Hoon Lee]], [[Hyeong-Rae Noh|AUTHOR Hyeong-Rae Noh]], [[Seong-Whan Lee|AUTHOR Seong-Whan Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1437.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-5|PAPER Thu-1-1-5 — StrawNet: Self-Training WaveNet for TTS in Low-Data Regimes]]</div>|<div class="cpsessionviewpapertitle">StrawNet: Self-Training WaveNet for TTS in Low-Data Regimes</div><div class="cpsessionviewpaperauthor">[[Manish Sharma|AUTHOR Manish Sharma]], [[Tom Kenter|AUTHOR Tom Kenter]], [[Rob Clark|AUTHOR Rob Clark]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1463.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-6|PAPER Thu-1-1-6 — An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis]]</div>|<div class="cpsessionviewpapertitle">An Efficient Subband Linear Prediction for LPCNet-Based Neural Synthesis</div><div class="cpsessionviewpaperauthor">[[Yang Cui|AUTHOR Yang Cui]], [[Xi Wang|AUTHOR Xi Wang]], [[Lei He|AUTHOR Lei He]], [[Frank K. Soong|AUTHOR Frank K. Soong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1613.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-7|PAPER Thu-1-1-7 — Reverberation Modeling for Source-Filter-Based Neural Vocoder]]</div>|<div class="cpsessionviewpapertitle">Reverberation Modeling for Source-Filter-Based Neural Vocoder</div><div class="cpsessionviewpaperauthor">[[Yang Ai|AUTHOR Yang Ai]], [[Xin Wang|AUTHOR Xin Wang]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2041.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-8|PAPER Thu-1-1-8 — Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems]]</div>|<div class="cpsessionviewpapertitle">Bunched LPCNet: Vocoder for Low-Cost Neural Text-To-Speech Systems</div><div class="cpsessionviewpaperauthor">[[Ravichander Vipperla|AUTHOR Ravichander Vipperla]], [[Sangjun Park|AUTHOR Sangjun Park]], [[Kihyun Choo|AUTHOR Kihyun Choo]], [[Samin Ishtiaq|AUTHOR Samin Ishtiaq]], [[Kyoungbo Min|AUTHOR Kyoungbo Min]], [[Sourav Bhattacharya|AUTHOR Sourav Bhattacharya]], [[Abhinav Mehrotra|AUTHOR Abhinav Mehrotra]], [[Alberto Gil C.P. Ramos|AUTHOR Alberto Gil C.P. Ramos]], [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2116.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-9|PAPER Thu-1-1-9 — Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder]]</div>|<div class="cpsessionviewpapertitle">Neural Text-to-Speech with a Modeling-by-Generation Excitation Vocoder</div><div class="cpsessionviewpaperauthor">[[Eunwoo Song|AUTHOR Eunwoo Song]], [[Min-Jae Hwang|AUTHOR Min-Jae Hwang]], [[Ryuichi Yamamoto|AUTHOR Ryuichi Yamamoto]], [[Jin-Seob Kim|AUTHOR Jin-Seob Kim]], [[Ohsung Kwon|AUTHOR Ohsung Kwon]], [[Jae-Min Kim|AUTHOR Jae-Min Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2867.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-1-10|PAPER Thu-1-1-10 — SpeedySpeech: Efficient Neural Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">SpeedySpeech: Efficient Neural Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Jan Vainer|AUTHOR Jan Vainer]], [[Ondřej Dušek|AUTHOR Ondřej Dušek]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Jan Skoglund|
|^&nbsp;|^Xueliang Zhang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1307.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-1|PAPER Thu-1-10-1 — A Semi-Blind Source Separation Approach for Speech Dereverberation]]</div>|<div class="cpsessionviewpapertitle">A Semi-Blind Source Separation Approach for Speech Dereverberation</div><div class="cpsessionviewpaperauthor">[[Ziteng Wang|AUTHOR Ziteng Wang]], [[Yueyue Na|AUTHOR Yueyue Na]], [[Zhang Liu|AUTHOR Zhang Liu]], [[Yun Li|AUTHOR Yun Li]], [[Biao Tian|AUTHOR Biao Tian]], [[Qiang Fu|AUTHOR Qiang Fu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1553.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-2|PAPER Thu-1-10-2 — Virtual Acoustic Channel Expansion Based on Neural Networks for Weighted Prediction Error-Based Speech Dereverberation]]</div>|<div class="cpsessionviewpapertitle">Virtual Acoustic Channel Expansion Based on Neural Networks for Weighted Prediction Error-Based Speech Dereverberation</div><div class="cpsessionviewpaperauthor">[[Joon-Young Yang|AUTHOR Joon-Young Yang]], [[Joon-Hyuk Chang|AUTHOR Joon-Hyuk Chang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2048.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-3|PAPER Thu-1-10-3 — SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping]]</div>|<div class="cpsessionviewpapertitle">SkipConvNet: Skip Convolutional Neural Network for Speech Dereverberation Using Optimally Smoothed Spectral Mapping</div><div class="cpsessionviewpaperauthor">[[Vinay Kothapally|AUTHOR Vinay Kothapally]], [[Wei Xia|AUTHOR Wei Xia]], [[Shahram Ghorbani|AUTHOR Shahram Ghorbani]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]], [[Wei Xue|AUTHOR Wei Xue]], [[Jing Huang|AUTHOR Jing Huang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1260.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-4|PAPER Thu-1-10-4 — A Robust and Cascaded Acoustic Echo Cancellation Based on Deep Learning]]</div>|<div class="cpsessionviewpapertitle">A Robust and Cascaded Acoustic Echo Cancellation Based on Deep Learning</div><div class="cpsessionviewpaperauthor">[[Chenggang Zhang|AUTHOR Chenggang Zhang]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1454.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-5|PAPER Thu-1-10-5 — Generative Adversarial Network Based Acoustic Echo Cancellation]]</div>|<div class="cpsessionviewpapertitle">Generative Adversarial Network Based Acoustic Echo Cancellation</div><div class="cpsessionviewpaperauthor">[[Yi Zhang|AUTHOR Yi Zhang]], [[Chengyun Deng|AUTHOR Chengyun Deng]], [[Shiqian Ma|AUTHOR Shiqian Ma]], [[Yongtao Sha|AUTHOR Yongtao Sha]], [[Hui Song|AUTHOR Hui Song]], [[Xiangang Li|AUTHOR Xiangang Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1473.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-6|PAPER Thu-1-10-6 — Nonlinear Residual Echo Suppression Using a Recurrent Neural Network]]</div>|<div class="cpsessionviewpapertitle">Nonlinear Residual Echo Suppression Using a Recurrent Neural Network</div><div class="cpsessionviewpaperauthor">[[Lukas Pfeifenberger|AUTHOR Lukas Pfeifenberger]], [[Franz Pernkopf|AUTHOR Franz Pernkopf]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2131.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-7|PAPER Thu-1-10-7 — Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation]]</div>|<div class="cpsessionviewpapertitle">Independent Echo Path Modeling for Stereophonic Acoustic Echo Cancellation</div><div class="cpsessionviewpaperauthor">[[Yi Gao|AUTHOR Yi Gao]], [[Ian Liu|AUTHOR Ian Liu]], [[J. Zheng|AUTHOR J. Zheng]], [[Cheng Luo|AUTHOR Cheng Luo]], [[Bin Li|AUTHOR Bin Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2234.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-8|PAPER Thu-1-10-8 — Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet]]</div>|<div class="cpsessionviewpapertitle">Nonlinear Residual Echo Suppression Based on Multi-Stream Conv-TasNet</div><div class="cpsessionviewpaperauthor">[[Hongsheng Chen|AUTHOR Hongsheng Chen]], [[Teng Xiang|AUTHOR Teng Xiang]], [[Kai Chen|AUTHOR Kai Chen]], [[Jing Lu|AUTHOR Jing Lu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2479.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-9|PAPER Thu-1-10-9 — Improving Partition-Block-Based Acoustic Echo Canceler in Under-Modeling Scenarios]]</div>|<div class="cpsessionviewpapertitle">Improving Partition-Block-Based Acoustic Echo Canceler in Under-Modeling Scenarios</div><div class="cpsessionviewpaperauthor">[[Wenzhi Fan|AUTHOR Wenzhi Fan]], [[Jing Lu|AUTHOR Jing Lu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3200.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-10-10|PAPER Thu-1-10-10 — Attention Wave-U-Net for Acoustic Echo Cancellation]]</div>|<div class="cpsessionviewpapertitle">Attention Wave-U-Net for Acoustic Echo Cancellation</div><div class="cpsessionviewpaperauthor">[[Jung-Hee Kim|AUTHOR Jung-Hee Kim]], [[Joon-Hyuk Chang|AUTHOR Joon-Hyuk Chang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 11|<|
|^Chairs:&nbsp;|^Frank Soong|
|^&nbsp;|^Zhengqi Wen|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-1|PAPER Thu-1-11-1 — From Speaker Verification to Multispeaker Speech Synthesis, Deep Transfer with Feedback Constraint]]</div>|<div class="cpsessionviewpapertitle">From Speaker Verification to Multispeaker Speech Synthesis, Deep Transfer with Feedback Constraint</div><div class="cpsessionviewpaperauthor">[[Zexin Cai|AUTHOR Zexin Cai]], [[Chuxiong Zhang|AUTHOR Chuxiong Zhang]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-2|PAPER Thu-1-11-2 — Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?]]</div>|<div class="cpsessionviewpapertitle">Can Speaker Augmentation Improve Multi-Speaker End-to-End TTS?</div><div class="cpsessionviewpaperauthor">[[Erica Cooper|AUTHOR Erica Cooper]], [[Cheng-I Lai|AUTHOR Cheng-I Lai]], [[Yusuke Yasuda|AUTHOR Yusuke Yasuda]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1662.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-3|PAPER Thu-1-11-3 — Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding]]</div>|<div class="cpsessionviewpapertitle">Non-Autoregressive End-to-End TTS with Coarse-to-Fine Decoding</div><div class="cpsessionviewpaperauthor">[[Tao Wang|AUTHOR Tao Wang]], [[Xuefei Liu|AUTHOR Xuefei Liu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Ruibo Fu|AUTHOR Ruibo Fu]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1737.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-4|PAPER Thu-1-11-4 — Bi-Level Speaker Supervision for One-Shot Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Bi-Level Speaker Supervision for One-Shot Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Tao Wang|AUTHOR Tao Wang]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Ruibo Fu|AUTHOR Ruibo Fu]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Chunyu Qiang|AUTHOR Chunyu Qiang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1788.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-5|PAPER Thu-1-11-5 — Naturalness Enhancement with Linguistic Information in End-to-End TTS Using Unsupervised Parallel Encoding]]</div>|<div class="cpsessionviewpapertitle">Naturalness Enhancement with Linguistic Information in End-to-End TTS Using Unsupervised Parallel Encoding</div><div class="cpsessionviewpaperauthor">[[Alex Peiró-Lilja|AUTHOR Alex Peiró-Lilja]], [[Mireia Farrús|AUTHOR Mireia Farrús]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1976.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-6|PAPER Thu-1-11-6 — MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search]]</div>|<div class="cpsessionviewpapertitle">MoBoAligner: A Neural Alignment Model for Non-Autoregressive TTS with Monotonic Boundary Search</div><div class="cpsessionviewpaperauthor">[[Naihan Li|AUTHOR Naihan Li]], [[Shujie Liu|AUTHOR Shujie Liu]], [[Yanqing Liu|AUTHOR Yanqing Liu]], [[Sheng Zhao|AUTHOR Sheng Zhao]], [[Ming Liu|AUTHOR Ming Liu]], [[Ming Zhou|AUTHOR Ming Zhou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2123.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-7|PAPER Thu-1-11-7 — JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment]]</div>|<div class="cpsessionviewpapertitle">JDI-T: Jointly Trained Duration Informed Transformer for Text-To-Speech without Explicit Alignment</div><div class="cpsessionviewpaperauthor">[[Dan Lim|AUTHOR Dan Lim]], [[Won Jang|AUTHOR Won Jang]], [[Gyeonghwan O|AUTHOR Gyeonghwan O]], [[Heayoung Park|AUTHOR Heayoung Park]], [[Bongwan Kim|AUTHOR Bongwan Kim]], [[Jaesam Yoon|AUTHOR Jaesam Yoon]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2347.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-8|PAPER Thu-1-11-8 — End-to-End Text-to-Speech Synthesis with Unaligned Multiple Language Units Based on Attention]]</div>|<div class="cpsessionviewpapertitle">End-to-End Text-to-Speech Synthesis with Unaligned Multiple Language Units Based on Attention</div><div class="cpsessionviewpaperauthor">[[Masashi Aso|AUTHOR Masashi Aso]], [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-9|PAPER Thu-1-11-9 — Attention Forcing for Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Attention Forcing for Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Qingyun Dou|AUTHOR Qingyun Dou]], [[Joshua Efiong|AUTHOR Joshua Efiong]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2618.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-10|PAPER Thu-1-11-10 — Testing the Limits of Representation Mixing for Pronunciation Correction in End-to-End Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Testing the Limits of Representation Mixing for Pronunciation Correction in End-to-End Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Jason Fong|AUTHOR Jason Fong]], [[Jason Taylor|AUTHOR Jason Taylor]], [[Simon King|AUTHOR Simon King]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-11-11|PAPER Thu-1-11-11 — MultiSpeech: Multi-Speaker Text to Speech with Transformer]]</div>|<div class="cpsessionviewpapertitle">MultiSpeech: Multi-Speaker Text to Speech with Transformer</div><div class="cpsessionviewpaperauthor">[[Mingjian Chen|AUTHOR Mingjian Chen]], [[Xu Tan|AUTHOR Xu Tan]], [[Yi Ren|AUTHOR Yi Ren]], [[Jin Xu|AUTHOR Jin Xu]], [[Hao Sun|AUTHOR Hao Sun]], [[Sheng Zhao|AUTHOR Sheng Zhao]], [[Tao Qin|AUTHOR Tao Qin]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Yanzhang He|
|^&nbsp;|^Steve Renals|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-1|PAPER Thu-1-2-1 — Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised End-to-End ASR via Teacher-Student Learning with Conditional Posterior Distribution</div><div class="cpsessionviewpaperauthor">[[Zi-qiang Zhang|AUTHOR Zi-qiang Zhang]], [[Yan Song|AUTHOR Yan Song]], [[Jian-shu Zhang|AUTHOR Jian-shu Zhang]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2056.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-2|PAPER Thu-1-2-2 — Leveraging Unlabeled Speech for Sequence Discriminative Training of Acoustic Models]]</div>|<div class="cpsessionviewpapertitle">Leveraging Unlabeled Speech for Sequence Discriminative Training of Acoustic Models</div><div class="cpsessionviewpaperauthor">[[Ashtosh Sapru|AUTHOR Ashtosh Sapru]], [[Sri Garimella|AUTHOR Sri Garimella]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-3|PAPER Thu-1-2-3 — Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability]]</div>|<div class="cpsessionviewpapertitle">Developing RNN-T Models Surpassing High-Performance Hybrid Models with Customization Capability</div><div class="cpsessionviewpaperauthor">[[Jinyu Li|AUTHOR Jinyu Li]], [[Rui Zhao|AUTHOR Rui Zhao]], [[Zhong Meng|AUTHOR Zhong Meng]], [[Yanqing Liu|AUTHOR Yanqing Liu]], [[Wenning Wei|AUTHOR Wenning Wei]], [[Sarangarajan Parthasarathy|AUTHOR Sarangarajan Parthasarathy]], [[Vadim Mazalov|AUTHOR Vadim Mazalov]], [[Zhenghao Wang|AUTHOR Zhenghao Wang]], [[Lei He|AUTHOR Lei He]], [[Sheng Zhao|AUTHOR Sheng Zhao]], [[Yifan Gong|AUTHOR Yifan Gong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2816.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-4|PAPER Thu-1-2-4 — End-to-End ASR with Adaptive Span Self-Attention]]</div>|<div class="cpsessionviewpapertitle">End-to-End ASR with Adaptive Span Self-Attention</div><div class="cpsessionviewpaperauthor">[[Xuankai Chang|AUTHOR Xuankai Chang]], [[Aswin Shanmugam Subramanian|AUTHOR Aswin Shanmugam Subramanian]], [[Pengcheng Guo|AUTHOR Pengcheng Guo]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Yuya Fujita|AUTHOR Yuya Fujita]], [[Motoi Omachi|AUTHOR Motoi Omachi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1569.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-5|PAPER Thu-1-2-5 — Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Subword Regularization: An Analysis of Scalability and Generalization for End-to-End Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Egor Lakomkin|AUTHOR Egor Lakomkin]], [[Jahn Heymann|AUTHOR Jahn Heymann]], [[Ilya Sklyar|AUTHOR Ilya Sklyar]], [[Simon Wiesler|AUTHOR Simon Wiesler]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2675.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-6|PAPER Thu-1-2-6 — Early Stage LM Integration Using Local and Global Log-Linear Combination]]</div>|<div class="cpsessionviewpapertitle">Early Stage LM Integration Using Local and Global Log-Linear Combination</div><div class="cpsessionviewpaperauthor">[[Wilfried Michel|AUTHOR Wilfried Michel]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-7|PAPER Thu-1-2-7 — ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context]]</div>|<div class="cpsessionviewpapertitle">ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context</div><div class="cpsessionviewpaperauthor">[[Wei Han|AUTHOR Wei Han]], [[Zhengdong Zhang|AUTHOR Zhengdong Zhang]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Jiahui Yu|AUTHOR Jiahui Yu]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[James Qin|AUTHOR James Qin]], [[Anmol Gulati|AUTHOR Anmol Gulati]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[Yonghui Wu|AUTHOR Yonghui Wu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-8|PAPER Thu-1-2-8 — Emitting Word Timings with End-to-End Models]]</div>|<div class="cpsessionviewpapertitle">Emitting Word Timings with End-to-End Models</div><div class="cpsessionviewpaperauthor">[[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[David Rybach|AUTHOR David Rybach]], [[Basi García|AUTHOR Basi García]], [[Trevor Strohman|AUTHOR Trevor Strohman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2897.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-2-9|PAPER Thu-1-2-9 — Low-Latency Sequence-to-Sequence Speech Recognition and Translation by Partial Hypothesis Selection]]</div>|<div class="cpsessionviewpapertitle">Low-Latency Sequence-to-Sequence Speech Recognition and Translation by Partial Hypothesis Selection</div><div class="cpsessionviewpaperauthor">[[Danni Liu|AUTHOR Danni Liu]], [[Gerasimos Spanakis|AUTHOR Gerasimos Spanakis]], [[Jan Niehues|AUTHOR Jan Niehues]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Penny Karanasou|
|^&nbsp;|^Jia Cui|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-3-1|PAPER Thu-1-3-1 — Neural Language Modeling with Implicit Cache Pointers]]</div>|<div class="cpsessionviewpapertitle">Neural Language Modeling with Implicit Cache Pointers</div><div class="cpsessionviewpaperauthor">[[Ke Li|AUTHOR Ke Li]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1784.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-3-2|PAPER Thu-1-3-2 — Finnish ASR with Deep Transformer Models]]</div>|<div class="cpsessionviewpapertitle">Finnish ASR with Deep Transformer Models</div><div class="cpsessionviewpaperauthor">[[Abhilash Jain|AUTHOR Abhilash Jain]], [[Aku Rouhe|AUTHOR Aku Rouhe]], [[Stig-Arne Grönroos|AUTHOR Stig-Arne Grönroos]], [[Mikko Kurimo|AUTHOR Mikko Kurimo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1179.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-3-3|PAPER Thu-1-3-3 — Distilling the Knowledge of BERT for Sequence-to-Sequence ASR]]</div>|<div class="cpsessionviewpapertitle">Distilling the Knowledge of BERT for Sequence-to-Sequence ASR</div><div class="cpsessionviewpaperauthor">[[Hayato Futami|AUTHOR Hayato Futami]], [[Hirofumi Inaguma|AUTHOR Hirofumi Inaguma]], [[Sei Ueno|AUTHOR Sei Ueno]], [[Masato Mimura|AUTHOR Masato Mimura]], [[Shinsuke Sakai|AUTHOR Shinsuke Sakai]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1493.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-3-4|PAPER Thu-1-3-4 — Stochastic Convolutional Recurrent Networks for Language Modeling]]</div>|<div class="cpsessionviewpapertitle">Stochastic Convolutional Recurrent Networks for Language Modeling</div><div class="cpsessionviewpaperauthor">[[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]], [[Yu-Min Huang|AUTHOR Yu-Min Huang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1849.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-3-5|PAPER Thu-1-3-5 — Investigation of Large-Margin Softmax in Neural Language Modeling]]</div>|<div class="cpsessionviewpapertitle">Investigation of Large-Margin Softmax in Neural Language Modeling</div><div class="cpsessionviewpaperauthor">[[Jingjing Huo|AUTHOR Jingjing Huo]], [[Yingbo Gao|AUTHOR Yingbo Gao]], [[Weiyue Wang|AUTHOR Weiyue Wang]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1344.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-3-6|PAPER Thu-1-3-6 — Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model]]</div>|<div class="cpsessionviewpapertitle">Contextualizing ASR Lattice Rescoring with Hybrid Pointer Network Language Model</div><div class="cpsessionviewpaperauthor">[[Da-Rong Liu|AUTHOR Da-Rong Liu]], [[Chunxi Liu|AUTHOR Chunxi Liu]], [[Frank Zhang|AUTHOR Frank Zhang]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Yatharth Saraf|AUTHOR Yatharth Saraf]], [[Geoffrey Zweig|AUTHOR Geoffrey Zweig]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2404.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-3-7|PAPER Thu-1-3-7 — Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict]]</div>|<div class="cpsessionviewpapertitle">Mask CTC: Non-Autoregressive End-to-End ASR with CTC and Mask Predict</div><div class="cpsessionviewpaperauthor">[[Yosuke Higuchi|AUTHOR Yosuke Higuchi]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Nanxin Chen|AUTHOR Nanxin Chen]], [[Tetsuji Ogawa|AUTHOR Tetsuji Ogawa]], [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1619.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-3-8|PAPER Thu-1-3-8 — Insertion-Based Modeling for End-to-End Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Insertion-Based Modeling for End-to-End Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Yuya Fujita|AUTHOR Yuya Fujita]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Motoi Omachi|AUTHOR Motoi Omachi]], [[Xuankai Chang|AUTHOR Xuankai Chang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Torbjørn Svendsen|
|^&nbsp;|^Preeti Rao|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0995.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-1|PAPER Thu-1-4-1 — Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection]]</div>|<div class="cpsessionviewpapertitle">Voice Activity Detection in the Wild via Weakly Supervised Sound Event Detection</div><div class="cpsessionviewpaperauthor">[[Yefei Chen|AUTHOR Yefei Chen]], [[Heinrich Dinkel|AUTHOR Heinrich Dinkel]], [[Mengyue Wu|AUTHOR Mengyue Wu]], [[Kai Yu|AUTHOR Kai Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0997.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-2|PAPER Thu-1-4-2 — Dual Attention in Time and Frequency Domain for Voice Activity Detection]]</div>|<div class="cpsessionviewpapertitle">Dual Attention in Time and Frequency Domain for Voice Activity Detection</div><div class="cpsessionviewpaperauthor">[[Joohyung Lee|AUTHOR Joohyung Lee]], [[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Hoirin Kim|AUTHOR Hoirin Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-3|PAPER Thu-1-4-3 — Polishing the Classical Likelihood Ratio Test by Supervised Learning for Voice Activity Detection]]</div>|<div class="cpsessionviewpapertitle">Polishing the Classical Likelihood Ratio Test by Supervised Learning for Voice Activity Detection</div><div class="cpsessionviewpaperauthor">[[Tianjiao Xu|AUTHOR Tianjiao Xu]], [[Hui Zhang|AUTHOR Hui Zhang]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-4|PAPER Thu-1-4-4 — A Noise Robust Technique for Detecting Vowels in Speech Signals]]</div>|<div class="cpsessionviewpapertitle">A Noise Robust Technique for Detecting Vowels in Speech Signals</div><div class="cpsessionviewpaperauthor">[[Avinash Kumar|AUTHOR Avinash Kumar]], [[S. Shahnawazuddin|AUTHOR S. Shahnawazuddin]], [[Waquar Ahmad|AUTHOR Waquar Ahmad]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2285.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-5|PAPER Thu-1-4-5 — End-to-End Domain-Adversarial Voice Activity Detection]]</div>|<div class="cpsessionviewpapertitle">End-to-End Domain-Adversarial Voice Activity Detection</div><div class="cpsessionviewpaperauthor">[[Marvin Lavechin|AUTHOR Marvin Lavechin]], [[Marie-Philippe Gill|AUTHOR Marie-Philippe Gill]], [[Ruben Bousbib|AUTHOR Ruben Bousbib]], [[Hervé Bredin|AUTHOR Hervé Bredin]], [[Leibny Paola Garcia-Perera|AUTHOR Leibny Paola Garcia-Perera]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2326.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-6|PAPER Thu-1-4-6 — VOP Detection in Variable Speech Rate Condition]]</div>|<div class="cpsessionviewpapertitle">VOP Detection in Variable Speech Rate Condition</div><div class="cpsessionviewpaperauthor">[[Ayush Agarwal|AUTHOR Ayush Agarwal]], [[Jagabandhu Mishra|AUTHOR Jagabandhu Mishra]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-7|PAPER Thu-1-4-7 — MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection]]</div>|<div class="cpsessionviewpapertitle">MLNET: An Adaptive Multiple Receptive-Field Attention Neural Network for Voice Activity Detection</div><div class="cpsessionviewpaperauthor">[[Zhenpeng Zheng|AUTHOR Zhenpeng Zheng]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Ning Cheng|AUTHOR Ning Cheng]], [[Jian Luo|AUTHOR Jian Luo]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2398.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-8|PAPER Thu-1-4-8 — Self-Supervised Contrastive Learning for Unsupervised Phoneme Segmentation]]</div>|<div class="cpsessionviewpapertitle">Self-Supervised Contrastive Learning for Unsupervised Phoneme Segmentation</div><div class="cpsessionviewpaperauthor">[[Felix Kreuk|AUTHOR Felix Kreuk]], [[Joseph Keshet|AUTHOR Joseph Keshet]], [[Yossi Adi|AUTHOR Yossi Adi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-9|PAPER Thu-1-4-9 — That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages]]</div>|<div class="cpsessionviewpapertitle">That Sounds Familiar: An Analysis of Phonetic Representations Transfer Across Languages</div><div class="cpsessionviewpaperauthor">[[Piotr Żelasko|AUTHOR Piotr Żelasko]], [[Laureano Moro-Velázquez|AUTHOR Laureano Moro-Velázquez]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]], [[Odette Scharenborg|AUTHOR Odette Scharenborg]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2804.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-4-10|PAPER Thu-1-4-10 — Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development]]</div>|<div class="cpsessionviewpapertitle">Analyzing Read Aloud Speech by Primary School Pupils: Insights for Research and Development</div><div class="cpsessionviewpaperauthor">[[S. Limonard|AUTHOR S. Limonard]], [[Catia Cucchiarini|AUTHOR Catia Cucchiarini]], [[R.W.N.M. van Hout|AUTHOR R.W.N.M. van Hout]], [[Helmer Strik|AUTHOR Helmer Strik]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Tatsuya Kitamura|
|^&nbsp;|^Jianwu Dang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3186.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-5-1|PAPER Thu-1-5-1 — Discovering Articulatory Speech Targets from Synthesized Random Babble]]</div>|<div class="cpsessionviewpapertitle">Discovering Articulatory Speech Targets from Synthesized Random Babble</div><div class="cpsessionviewpaperauthor">[[Heikki Rasilo|AUTHOR Heikki Rasilo]], [[Yannick Jadoul|AUTHOR Yannick Jadoul]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0016.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-5-2|PAPER Thu-1-5-2 — Speaker Dependent Acoustic-to-Articulatory Inversion Using Real-Time MRI of the Vocal Tract]]</div>|<div class="cpsessionviewpapertitle">Speaker Dependent Acoustic-to-Articulatory Inversion Using Real-Time MRI of the Vocal Tract</div><div class="cpsessionviewpaperauthor">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-5-3|PAPER Thu-1-5-3 — Acoustic-to-Articulatory Inversion with Deep Autoregressive Articulatory-WaveNet]]</div>|<div class="cpsessionviewpapertitle">Acoustic-to-Articulatory Inversion with Deep Autoregressive Articulatory-WaveNet</div><div class="cpsessionviewpaperauthor">[[Narjes Bozorg|AUTHOR Narjes Bozorg]], [[Michael T. Johnson|AUTHOR Michael T. Johnson]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-5-4|PAPER Thu-1-5-4 — Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV]]</div>|<div class="cpsessionviewpapertitle">Using Silence MR Image to Synthesise Dynamic MRI Vocal Tract Data of CV</div><div class="cpsessionviewpaperauthor">[[Ioannis K. Douros|AUTHOR Ioannis K. Douros]], [[Ajinkya Kulkarni|AUTHOR Ajinkya Kulkarni]], [[Chrysanthi Dourou|AUTHOR Chrysanthi Dourou]], [[Yu Xie|AUTHOR Yu Xie]], [[Jacques Felblinger|AUTHOR Jacques Felblinger]], [[Karyna Isaieva|AUTHOR Karyna Isaieva]], [[Pierre-André Vuissoz|AUTHOR Pierre-André Vuissoz]], [[Yves Laprie|AUTHOR Yves Laprie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1672.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-5-5|PAPER Thu-1-5-5 — Quantification of Transducer Misalignment in Ultrasound Tongue Imaging]]</div>|<div class="cpsessionviewpapertitle">Quantification of Transducer Misalignment in Ultrasound Tongue Imaging</div><div class="cpsessionviewpaperauthor">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]], [[Kele Xu|AUTHOR Kele Xu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1746.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-5-6|PAPER Thu-1-5-6 — Independent and Automatic Evaluation of Speaker-Independent Acoustic-to-Articulatory Reconstruction]]</div>|<div class="cpsessionviewpapertitle">Independent and Automatic Evaluation of Speaker-Independent Acoustic-to-Articulatory Reconstruction</div><div class="cpsessionviewpaperauthor">[[Maud Parrot|AUTHOR Maud Parrot]], [[Juliette Millet|AUTHOR Juliette Millet]], [[Ewan Dunbar|AUTHOR Ewan Dunbar]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2859.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-5-7|PAPER Thu-1-5-7 — CSL-EMG_Array: An Open Access Corpus for EMG-to-Speech Conversion]]</div>|<div class="cpsessionviewpapertitle">CSL-EMG_Array: An Open Access Corpus for EMG-to-Speech Conversion</div><div class="cpsessionviewpaperauthor">[[Lorenz Diener|AUTHOR Lorenz Diener]], [[Mehrdad Roustay Vishkasougheh|AUTHOR Mehrdad Roustay Vishkasougheh]], [[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1175.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-5-8|PAPER Thu-1-5-8 — Links Between Production and Perception of Glottalisation in Individual Australian English Speaker/Listeners]]</div>|<div class="cpsessionviewpapertitle">Links Between Production and Perception of Glottalisation in Individual Australian English Speaker/Listeners</div><div class="cpsessionviewpaperauthor">[[Joshua Penney|AUTHOR Joshua Penney]], [[Felicity Cox|AUTHOR Felicity Cox]], [[Anita Szakay|AUTHOR Anita Szakay]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Ming Li|
|^&nbsp;|^Yong Ma|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-7-1|PAPER Thu-1-7-1 — Dynamic Margin Softmax Loss for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Dynamic Margin Softmax Loss for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Dao Zhou|AUTHOR Dao Zhou]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Yibo Wu|AUTHOR Yibo Wu]], [[Meng Liu|AUTHOR Meng Liu]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Jianguo Wei|AUTHOR Jianguo Wei]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-7-2|PAPER Thu-1-7-2 — On Parameter Adaptation in Softmax-Based Cross-Entropy Loss for Improved Convergence Speed and Accuracy in DNN-Based Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">On Parameter Adaptation in Softmax-Based Cross-Entropy Loss for Improved Convergence Speed and Accuracy in DNN-Based Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Magdalena Rybicka|AUTHOR Magdalena Rybicka]], [[Konrad Kowalczyk|AUTHOR Konrad Kowalczyk]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-7-3|PAPER Thu-1-7-3 — Training Speaker Enrollment Models by Network Optimization]]</div>|<div class="cpsessionviewpapertitle">Training Speaker Enrollment Models by Network Optimization</div><div class="cpsessionviewpaperauthor">[[Victoria Mingote|AUTHOR Victoria Mingote]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2342.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-7-4|PAPER Thu-1-7-4 — Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data]]</div>|<div class="cpsessionviewpapertitle">Supervised Domain Adaptation for Text-Independent Speaker Verification Using Limited Data</div><div class="cpsessionviewpaperauthor">[[Seyyed Saeed Sarfjoo|AUTHOR Seyyed Saeed Sarfjoo]], [[Srikanth Madikeri|AUTHOR Srikanth Madikeri]], [[Petr Motlicek|AUTHOR Petr Motlicek]], [[Sébastien Marcel|AUTHOR Sébastien Marcel]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2538.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-7-5|PAPER Thu-1-7-5 — Angular Margin Centroid Loss for Text-Independent Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Angular Margin Centroid Loss for Text-Independent Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Yuheng Wei|AUTHOR Yuheng Wei]], [[Junzhao Du|AUTHOR Junzhao Du]], [[Hui Liu|AUTHOR Hui Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2562.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-7-6|PAPER Thu-1-7-6 — Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning]]</div>|<div class="cpsessionviewpapertitle">Domain-Invariant Speaker Vector Projection by Model-Agnostic Meta-Learning</div><div class="cpsessionviewpaperauthor">[[Jiawen Kang|AUTHOR Jiawen Kang]], [[Ruiqi Liu|AUTHOR Ruiqi Liu]], [[Lantian Li|AUTHOR Lantian Li]], [[Yunqi Cai|AUTHOR Yunqi Cai]], [[Dong Wang|AUTHOR Dong Wang]], [[Thomas Fang Zheng|AUTHOR Thomas Fang Zheng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2650.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-7-7|PAPER Thu-1-7-7 — ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Brecht Desplanques|AUTHOR Brecht Desplanques]], [[Jenthe Thienpondt|AUTHOR Jenthe Thienpondt]], [[Kris Demuynck|AUTHOR Kris Demuynck]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2872.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-7-8|PAPER Thu-1-7-8 — Length- and Noise-Aware Training Techniques for Short-Utterance Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Length- and Noise-Aware Training Techniques for Short-Utterance Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Wenda Chen|AUTHOR Wenda Chen]], [[Jonathan Huang|AUTHOR Jonathan Huang]], [[Tobias Bocklet|AUTHOR Tobias Bocklet]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 8|<|
|^Chairs:&nbsp;|^Sabato Marco Siniscalchi|
|^&nbsp;|^Linlin Wang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1852.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-1|PAPER Thu-1-8-1 — Spoken Language ‘Grammatical Error Correction’]]</div>|<div class="cpsessionviewpapertitle">Spoken Language ‘Grammatical Error Correction’</div><div class="cpsessionviewpaperauthor">[[Yiting Lu|AUTHOR Yiting Lu]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Yu Wang|AUTHOR Yu Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-2|PAPER Thu-1-8-2 — Mixtures of Deep Neural Experts for Automated Speech Scoring]]</div>|<div class="cpsessionviewpapertitle">Mixtures of Deep Neural Experts for Automated Speech Scoring</div><div class="cpsessionviewpaperauthor">[[Sara Papi|AUTHOR Sara Papi]], [[Edmondo Trentin|AUTHOR Edmondo Trentin]], [[Roberto Gretter|AUTHOR Roberto Gretter]], [[Marco Matassoni|AUTHOR Marco Matassoni]], [[Daniele Falavigna|AUTHOR Daniele Falavigna]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1766.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-3|PAPER Thu-1-8-3 — Targeted Content Feedback in Spoken Language Learning and Assessment]]</div>|<div class="cpsessionviewpapertitle">Targeted Content Feedback in Spoken Language Learning and Assessment</div><div class="cpsessionviewpaperauthor">[[Xinhao Wang|AUTHOR Xinhao Wang]], [[Klaus Zechner|AUTHOR Klaus Zechner]], [[Christopher Hamill|AUTHOR Christopher Hamill]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1890.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-4|PAPER Thu-1-8-4 — Universal Adversarial Attacks on Spoken Language Assessment Systems]]</div>|<div class="cpsessionviewpapertitle">Universal Adversarial Attacks on Spoken Language Assessment Systems</div><div class="cpsessionviewpaperauthor">[[Vyas Raina|AUTHOR Vyas Raina]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Kate M. Knill|AUTHOR Kate M. Knill]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-5|PAPER Thu-1-8-5 — Ensemble Approaches for Uncertainty in Spoken Language Assessment]]</div>|<div class="cpsessionviewpapertitle">Ensemble Approaches for Uncertainty in Spoken Language Assessment</div><div class="cpsessionviewpaperauthor">[[Xixin Wu|AUTHOR Xixin Wu]], [[Kate M. Knill|AUTHOR Kate M. Knill]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Andrey Malinin|AUTHOR Andrey Malinin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2550.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-6|PAPER Thu-1-8-6 — Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing]]</div>|<div class="cpsessionviewpapertitle">Shadowability Annotation with Fine Granularity on L2 Utterances and its Improvement with Native Listeners’ Script-Shadowing</div><div class="cpsessionviewpaperauthor">[[Zhenchao Lin|AUTHOR Zhenchao Lin]], [[Ryo Takashima|AUTHOR Ryo Takashima]], [[Daisuke Saito|AUTHOR Daisuke Saito]], [[Nobuaki Minematsu|AUTHOR Nobuaki Minematsu]], [[Noriko Nakanishi|AUTHOR Noriko Nakanishi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2842.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-7|PAPER Thu-1-8-7 — ASR-Based Evaluation and Feedback for Individualized Reading Practice]]</div>|<div class="cpsessionviewpapertitle">ASR-Based Evaluation and Feedback for Individualized Reading Practice</div><div class="cpsessionviewpaperauthor">[[Yu Bai|AUTHOR Yu Bai]], [[Ferdy Hubers|AUTHOR Ferdy Hubers]], [[Catia Cucchiarini|AUTHOR Catia Cucchiarini]], [[Helmer Strik|AUTHOR Helmer Strik]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2845.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-8|PAPER Thu-1-8-8 — Domain Adversarial Neural Networks for Dysarthric Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Domain Adversarial Neural Networks for Dysarthric Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Dominika Woszczyk|AUTHOR Dominika Woszczyk]], [[Stavros Petridis|AUTHOR Stavros Petridis]], [[David Millard|AUTHOR David Millard]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-8-9|PAPER Thu-1-8-9 — Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram]]</div>|<div class="cpsessionviewpapertitle">Automatic Estimation of Pathological Voice Quality Based on Recurrent Neural Network Using Amplitude and Phase Spectrogram</div><div class="cpsessionviewpaperauthor">[[Shunsuke Hidaka|AUTHOR Shunsuke Hidaka]], [[Yogaku Lee|AUTHOR Yogaku Lee]], [[Kohei Wakamiya|AUTHOR Kohei Wakamiya]], [[Takashi Nakagawa|AUTHOR Takashi Nakagawa]], [[Tokihiko Kaburagi|AUTHOR Tokihiko Kaburagi]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Chloé Clavel|
|^&nbsp;|^Kristiina Jokinen|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1313.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-9-1|PAPER Thu-1-9-1 — Stochastic Curiosity Exploration for Dialogue Systems]]</div>|<div class="cpsessionviewpapertitle">Stochastic Curiosity Exploration for Dialogue Systems</div><div class="cpsessionviewpaperauthor">[[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]], [[Po-Chien Hsu|AUTHOR Po-Chien Hsu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-9-2|PAPER Thu-1-9-2 — Conditional Response Augmentation for Dialogue Using Knowledge Distillation]]</div>|<div class="cpsessionviewpapertitle">Conditional Response Augmentation for Dialogue Using Knowledge Distillation</div><div class="cpsessionviewpaperauthor">[[Myeongho Jeong|AUTHOR Myeongho Jeong]], [[Seungtaek Choi|AUTHOR Seungtaek Choi]], [[Hojae Han|AUTHOR Hojae Han]], [[Kyungho Kim|AUTHOR Kyungho Kim]], [[Seung-won Hwang|AUTHOR Seung-won Hwang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1865.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-9-3|PAPER Thu-1-9-3 — Prototypical Q Networks for Automatic Conversational Diagnosis and Few-Shot New Disease Adaption]]</div>|<div class="cpsessionviewpapertitle">Prototypical Q Networks for Automatic Conversational Diagnosis and Few-Shot New Disease Adaption</div><div class="cpsessionviewpaperauthor">[[Hongyin Luo|AUTHOR Hongyin Luo]], [[Shang-Wen Li|AUTHOR Shang-Wen Li]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-9-4|PAPER Thu-1-9-4 — End-to-End Task-Oriented Dialog System Through Template Slot Value Generation]]</div>|<div class="cpsessionviewpapertitle">End-to-End Task-Oriented Dialog System Through Template Slot Value Generation</div><div class="cpsessionviewpaperauthor">[[Teakgyu Hong|AUTHOR Teakgyu Hong]], [[Oh-Woog Kwon|AUTHOR Oh-Woog Kwon]], [[Young-Kil Kim|AUTHOR Young-Kil Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-9-5|PAPER Thu-1-9-5 — Task-Oriented Dialog Generation with Enhanced Entity Representation]]</div>|<div class="cpsessionviewpapertitle">Task-Oriented Dialog Generation with Enhanced Entity Representation</div><div class="cpsessionviewpaperauthor">[[Zhenhao He|AUTHOR Zhenhao He]], [[Jiachun Wang|AUTHOR Jiachun Wang]], [[Jian Chen|AUTHOR Jian Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-9-6|PAPER Thu-1-9-6 — End-to-End Speech-to-Dialog-Act Recognition]]</div>|<div class="cpsessionviewpapertitle">End-to-End Speech-to-Dialog-Act Recognition</div><div class="cpsessionviewpaperauthor">[[Viet-Trung Dang|AUTHOR Viet-Trung Dang]], [[Tianyu Zhao|AUTHOR Tianyu Zhao]], [[Sei Ueno|AUTHOR Sei Ueno]], [[Hirofumi Inaguma|AUTHOR Hirofumi Inaguma]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1962.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-9-7|PAPER Thu-1-9-7 — Discriminative Transfer Learning for Optimizing ASR and Semantic Labeling in Task-Oriented Spoken Dialog]]</div>|<div class="cpsessionviewpapertitle">Discriminative Transfer Learning for Optimizing ASR and Semantic Labeling in Task-Oriented Spoken Dialog</div><div class="cpsessionviewpaperauthor">[[Yao Qian|AUTHOR Yao Qian]], [[Yu Shi|AUTHOR Yu Shi]], [[Michael Zeng|AUTHOR Michael Zeng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1341.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-1-9-8|PAPER Thu-1-9-8 — Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task]]</div>|<div class="cpsessionviewpapertitle">Datasets and Benchmarks for Task-Oriented Log Dialogue Ranking Task</div><div class="cpsessionviewpaperauthor">[[Xinnuo Xu|AUTHOR Xinnuo Xu]], [[Yizhe Zhang|AUTHOR Yizhe Zhang]], [[Lars Liden|AUTHOR Lars Liden]], [[Sungjin Lee|AUTHOR Sungjin Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Timo Gerkmann|
|^&nbsp;|^Yi Zhou|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1269.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-1|PAPER Thu-2-1-1 — Exploiting Conic Affinity Measures to Design Speech Enhancement Systems Operating in Unseen Noise Conditions]]</div>|<div class="cpsessionviewpapertitle">Exploiting Conic Affinity Measures to Design Speech Enhancement Systems Operating in Unseen Noise Conditions</div><div class="cpsessionviewpaperauthor">[[Pavlos Papadopoulos|AUTHOR Pavlos Papadopoulos]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2500.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-2|PAPER Thu-2-1-2 — Adversarial Dictionary Learning for Monaural Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Adversarial Dictionary Learning for Monaural Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Yunyun Ji|AUTHOR Yunyun Ji]], [[Longting Xu|AUTHOR Longting Xu]], [[Wei-Ping Zhu|AUTHOR Wei-Ping Zhu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2055.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-3|PAPER Thu-2-1-3 — Semi-Supervised Self-Produced Speech Enhancement and Suppression Based on Joint Source Modeling of Air- and Body-Conducted Signals Using Variational Autoencoder]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Self-Produced Speech Enhancement and Suppression Based on Joint Source Modeling of Air- and Body-Conducted Signals Using Variational Autoencoder</div><div class="cpsessionviewpaperauthor">[[Shogo Seki|AUTHOR Shogo Seki]], [[Moe Takada|AUTHOR Moe Takada]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2224.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-4|PAPER Thu-2-1-4 — Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Spatial Covariance Matrix Estimation for Reverberant Speech with Application to Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Ran Weisman|AUTHOR Ran Weisman]], [[Vladimir Tourbabin|AUTHOR Vladimir Tourbabin]], [[Paul Calamia|AUTHOR Paul Calamia]], [[Boaz Rafaely|AUTHOR Boaz Rafaely]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-5|PAPER Thu-2-1-5 — A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">A Cross-Channel Attention-Based Wave-U-Net for Multi-Channel Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Minh Tri Ho|AUTHOR Minh Tri Ho]], [[Jinyoung Lee|AUTHOR Jinyoung Lee]], [[Bong-Ki Lee|AUTHOR Bong-Ki Lee]], [[Dong Hoon Yi|AUTHOR Dong Hoon Yi]], [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1864.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-6|PAPER Thu-2-1-6 — TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids]]</div>|<div class="cpsessionviewpapertitle">TinyLSTMs: Efficient Neural Speech Enhancement for Hearing Aids</div><div class="cpsessionviewpaperauthor">[[Igor Fedorov|AUTHOR Igor Fedorov]], [[Marko Stamenovic|AUTHOR Marko Stamenovic]], [[Carl Jensen|AUTHOR Carl Jensen]], [[Li-Chia Yang|AUTHOR Li-Chia Yang]], [[Ari Mandell|AUTHOR Ari Mandell]], [[Yiming Gan|AUTHOR Yiming Gan]], [[Matthew Mattina|AUTHOR Matthew Mattina]], [[Paul N. Whatmough|AUTHOR Paul N. Whatmough]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-7|PAPER Thu-2-1-7 — Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment]]</div>|<div class="cpsessionviewpapertitle">Intelligibility Enhancement Based on Speech Waveform Modification Using Hearing Impairment</div><div class="cpsessionviewpaperauthor">[[Shu Hikosaka|AUTHOR Shu Hikosaka]], [[Shogo Seki|AUTHOR Shogo Seki]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Kazuya Takeda|AUTHOR Kazuya Takeda]], [[Hideki Banno|AUTHOR Hideki Banno]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1994.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-8|PAPER Thu-2-1-8 — Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network]]</div>|<div class="cpsessionviewpapertitle">Speaker and Phoneme-Aware Speech Bandwidth Extension with Residual Dual-Path Network</div><div class="cpsessionviewpaperauthor">[[Nana Hou|AUTHOR Nana Hou]], [[Chenglin Xu|AUTHOR Chenglin Xu]], [[Van Tung Pham|AUTHOR Van Tung Pham]], [[Joey Tianyi Zhou|AUTHOR Joey Tianyi Zhou]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2022.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-9|PAPER Thu-2-1-9 — Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension]]</div>|<div class="cpsessionviewpapertitle">Multi-Task Learning for End-to-End Noise-Robust Bandwidth Extension</div><div class="cpsessionviewpaperauthor">[[Nana Hou|AUTHOR Nana Hou]], [[Chenglin Xu|AUTHOR Chenglin Xu]], [[Joey Tianyi Zhou|AUTHOR Joey Tianyi Zhou]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-1-10|PAPER Thu-2-1-10 — Phase-Aware Music Super-Resolution Using Generative Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">Phase-Aware Music Super-Resolution Using Generative Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Shichao Hu|AUTHOR Shichao Hu]], [[Bin Zhang|AUTHOR Bin Zhang]], [[Beici Liang|AUTHOR Beici Liang]], [[Ethan Zhao|AUTHOR Ethan Zhao]], [[Simon Lui|AUTHOR Simon Lui]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 10|<|
|^Chairs:&nbsp;|^David Escudero|
|^&nbsp;|^Yao Qian|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-1|PAPER Thu-2-10-1 — Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern]]</div>|<div class="cpsessionviewpapertitle">Mobile-Assisted Prosody Training for Limited English Proficiency: Learner Background and Speech Learning Pattern</div><div class="cpsessionviewpaperauthor">[[Kevin Hirschi|AUTHOR Kevin Hirschi]], [[Okim Kang|AUTHOR Okim Kang]], [[Catia Cucchiarini|AUTHOR Catia Cucchiarini]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]], [[Keelan Evanini|AUTHOR Keelan Evanini]], [[Helmer Strik|AUTHOR Helmer Strik]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-2|PAPER Thu-2-10-2 — Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis]]</div>|<div class="cpsessionviewpapertitle">Finding Intelligible Consonant-Vowel Sounds Using High-Quality Articulatory Synthesis</div><div class="cpsessionviewpaperauthor">[[Daniel R. van Niekerk|AUTHOR Daniel R. van Niekerk]], [[Anqi Xu|AUTHOR Anqi Xu]], [[Branislav Gerazov|AUTHOR Branislav Gerazov]], [[Paul K. Krug|AUTHOR Paul K. Krug]], [[Peter Birkholz|AUTHOR Peter Birkholz]], [[Yi Xu|AUTHOR Yi Xu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2674.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-3|PAPER Thu-2-10-3 — Audiovisual Correspondence Learning in Humans and Machines]]</div>|<div class="cpsessionviewpapertitle">Audiovisual Correspondence Learning in Humans and Machines</div><div class="cpsessionviewpaperauthor">[[Venkat Krishnamohan|AUTHOR Venkat Krishnamohan]], [[Akshara Soman|AUTHOR Akshara Soman]], [[Anshul Gupta|AUTHOR Anshul Gupta]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1120.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-4|PAPER Thu-2-10-4 — Perception of English Fricatives and Affricates by Advanced Chinese Learners of English]]</div>|<div class="cpsessionviewpapertitle">Perception of English Fricatives and Affricates by Advanced Chinese Learners of English</div><div class="cpsessionviewpaperauthor">[[Yizhou Lan|AUTHOR Yizhou Lan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1068.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-5|PAPER Thu-2-10-5 — Perception of Japanese Consonant Length by Native Speakers of Korean Differing in Japanese Learning Experience]]</div>|<div class="cpsessionviewpapertitle">Perception of Japanese Consonant Length by Native Speakers of Korean Differing in Japanese Learning Experience</div><div class="cpsessionviewpaperauthor">[[Kimiko Tsukada|AUTHOR Kimiko Tsukada]], [[Joo-Yeon Kim|AUTHOR Joo-Yeon Kim]], [[Jeong-Im Han|AUTHOR Jeong-Im Han]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2145.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-6|PAPER Thu-2-10-6 — Automatic Detection of Phonological Errors in Child Speech Using Siamese Recurrent Autoencoder]]</div>|<div class="cpsessionviewpapertitle">Automatic Detection of Phonological Errors in Child Speech Using Siamese Recurrent Autoencoder</div><div class="cpsessionviewpaperauthor">[[Si-Ioi Ng|AUTHOR Si-Ioi Ng]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2207.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-7|PAPER Thu-2-10-7 — A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners]]</div>|<div class="cpsessionviewpapertitle">A Comparison of English Rhythm Produced by Native American Speakers and Mandarin ESL Primary School Learners</div><div class="cpsessionviewpaperauthor">[[Hongwei Ding|AUTHOR Hongwei Ding]], [[Binghuai Lin|AUTHOR Binghuai Lin]], [[Liyuan Wang|AUTHOR Liyuan Wang]], [[Hui Wang|AUTHOR Hui Wang]], [[Ruomei Fang|AUTHOR Ruomei Fang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2689.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-8|PAPER Thu-2-10-8 — Cross-Linguistic Interaction Between Phonological Categorization and Orthography Predicts Prosodic Effects in the Acquisition of Portuguese Liquids by L1-Mandarin Learners]]</div>|<div class="cpsessionviewpapertitle">Cross-Linguistic Interaction Between Phonological Categorization and Orthography Predicts Prosodic Effects in the Acquisition of Portuguese Liquids by L1-Mandarin Learners</div><div class="cpsessionviewpaperauthor">[[Chao Zhou|AUTHOR Chao Zhou]], [[Silke Hamann|AUTHOR Silke Hamann]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1640.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-10-9|PAPER Thu-2-10-9 — Cross-Linguistic Perception of Utterances with Willingness and Reluctance in Mandarin by Korean L2 Learners]]</div>|<div class="cpsessionviewpapertitle">Cross-Linguistic Perception of Utterances with Willingness and Reluctance in Mandarin by Korean L2 Learners</div><div class="cpsessionviewpaperauthor">[[Wenqian Li|AUTHOR Wenqian Li]], [[Jung-Yueh Tu|AUTHOR Jung-Yueh Tu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 11|<|
|^Chairs:&nbsp;|^Yuma Koizumi|
|^&nbsp;|^Jonathan Le Roux|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0990.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-1|PAPER Thu-2-11-1 — Speech Enhancement Based on Beamforming and Post-Filtering by Combining Phase Information]]</div>|<div class="cpsessionviewpapertitle">Speech Enhancement Based on Beamforming and Post-Filtering by Combining Phase Information</div><div class="cpsessionviewpaperauthor">[[Rui Cheng|AUTHOR Rui Cheng]], [[Changchun Bao|AUTHOR Changchun Bao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-2|PAPER Thu-2-11-2 — A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">A Noise-Aware Memory-Attention Network Architecture for Regression-Based Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Yu-Xuan Wang|AUTHOR Yu-Xuan Wang]], [[Jun Du|AUTHOR Jun Du]], [[Li Chai|AUTHOR Li Chai]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]], [[Jia Pan|AUTHOR Jia Pan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2143.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-3|PAPER Thu-2-11-3 — HiFi-GAN: High-Fidelity Denoising and Dereverberation Based on Speech Deep Features in Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">HiFi-GAN: High-Fidelity Denoising and Dereverberation Based on Speech Deep Features in Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Jiaqi Su|AUTHOR Jiaqi Su]], [[Zeyu Jin|AUTHOR Zeyu Jin]], [[Adam Finkelstein|AUTHOR Adam Finkelstein]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2561.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-4|PAPER Thu-2-11-4 — Learning Complex Spectral Mapping for Speech Enhancement with Improved Cross-Corpus Generalization]]</div>|<div class="cpsessionviewpapertitle">Learning Complex Spectral Mapping for Speech Enhancement with Improved Cross-Corpus Generalization</div><div class="cpsessionviewpaperauthor">[[Ashutosh Pandey|AUTHOR Ashutosh Pandey]], [[DeLiang Wang|AUTHOR DeLiang Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2588.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-5|PAPER Thu-2-11-5 — Speech Enhancement with Stochastic Temporal Convolutional Networks]]</div>|<div class="cpsessionviewpapertitle">Speech Enhancement with Stochastic Temporal Convolutional Networks</div><div class="cpsessionviewpaperauthor">[[Julius Richter|AUTHOR Julius Richter]], [[Guillaume Carbajal|AUTHOR Guillaume Carbajal]], [[Timo Gerkmann|AUTHOR Timo Gerkmann]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2935.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-6|PAPER Thu-2-11-6 — Visual Speech In Real Noisy Environments (VISION): A Novel Benchmark Dataset and Deep Learning-Based Baseline System]]</div>|<div class="cpsessionviewpapertitle">Visual Speech In Real Noisy Environments (VISION): A Novel Benchmark Dataset and Deep Learning-Based Baseline System</div><div class="cpsessionviewpaperauthor">[[Mandar Gogate|AUTHOR Mandar Gogate]], [[Kia Dashtipour|AUTHOR Kia Dashtipour]], [[Amir Hussain|AUTHOR Amir Hussain]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2989.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-7|PAPER Thu-2-11-7 — Sparse Mixture of Local Experts for Efficient Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Sparse Mixture of Local Experts for Efficient Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Aswin Sivaraman|AUTHOR Aswin Sivaraman]], [[Minje Kim|AUTHOR Minje Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3122.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-8|PAPER Thu-2-11-8 — Improved Speech Enhancement Using TCN with Multiple Encoder-Decoder Layers]]</div>|<div class="cpsessionviewpapertitle">Improved Speech Enhancement Using TCN with Multiple Encoder-Decoder Layers</div><div class="cpsessionviewpaperauthor">[[Vinith Kishore|AUTHOR Vinith Kishore]], [[Nitya Tiwari|AUTHOR Nitya Tiwari]], [[Periyasamy Paramasivam|AUTHOR Periyasamy Paramasivam]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1225.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-9|PAPER Thu-2-11-9 — Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations]]</div>|<div class="cpsessionviewpapertitle">Joint Training for Simultaneous Speech Denoising and Dereverberation with Deep Embedding Representations</div><div class="cpsessionviewpaperauthor">[[Cunhang Fan|AUTHOR Cunhang Fan]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3202.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-11-10|PAPER Thu-2-11-10 — Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Robust Speech Enhancement Based on Alpha-Stable Fast Multichannel Nonnegative Matrix Factorization</div><div class="cpsessionviewpaperauthor">[[Mathieu Fontaine|AUTHOR Mathieu Fontaine]], [[Kouhei Sekiguchi|AUTHOR Kouhei Sekiguchi]], [[Aditya Arie Nugraha|AUTHOR Aditya Arie Nugraha]], [[Kazuyoshi Yoshii|AUTHOR Kazuyoshi Yoshii]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Jing Han|
|^&nbsp;|^Shuo Liu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1391.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-2-1|PAPER Thu-2-2-1 — Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Learning Utterance-Level Representations with Label Smoothing for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Jian Huang|AUTHOR Jian Huang]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Zheng Lian|AUTHOR Zheng Lian]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3005.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-2-2|PAPER Thu-2-2-2 — Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Removing Bias with Residual Mixture of Multi-View Attention for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Md. Asif Jalal|AUTHOR Md. Asif Jalal]], [[Rosanna Milner|AUTHOR Rosanna Milner]], [[Thomas Hain|AUTHOR Thomas Hain]], [[Roger K. Moore|AUTHOR Roger K. Moore]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2572.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-2-3|PAPER Thu-2-2-3 — Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Adaptive Domain-Aware Representation Learning for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Weiquan Fan|AUTHOR Weiquan Fan]], [[Xiangmin Xu|AUTHOR Xiangmin Xu]], [[Xiaofen Xing|AUTHOR Xiaofen Xing]], [[Dongyan Huang|AUTHOR Dongyan Huang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2237.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-2-4|PAPER Thu-2-2-4 — Speech Emotion Recognition with Discriminative Feature Learning]]</div>|<div class="cpsessionviewpapertitle">Speech Emotion Recognition with Discriminative Feature Learning</div><div class="cpsessionviewpaperauthor">[[Huan Zhou|AUTHOR Huan Zhou]], [[Kai Liu|AUTHOR Kai Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2472.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-2-5|PAPER Thu-2-2-5 — Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions]]</div>|<div class="cpsessionviewpapertitle">Using Speech Enhancement Preprocessing for Speech Emotion Recognition in Realistic Noisy Conditions</div><div class="cpsessionviewpaperauthor">[[Hengshun Zhou|AUTHOR Hengshun Zhou]], [[Jun Du|AUTHOR Jun Du]], [[Yan-Hui Tu|AUTHOR Yan-Hui Tu]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1536.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-2-6|PAPER Thu-2-2-6 — Comparison of Glottal Source Parameter Values in Emotional Vowels]]</div>|<div class="cpsessionviewpapertitle">Comparison of Glottal Source Parameter Values in Emotional Vowels</div><div class="cpsessionviewpaperauthor">[[Yongwei Li|AUTHOR Yongwei Li]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Donna Erickson|AUTHOR Donna Erickson]], [[Masato Akagi|AUTHOR Masato Akagi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1714.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-2-7|PAPER Thu-2-2-7 — Learning to Recognize Per-Rater’s Emotion Perception Using Co-Rater Training Strategy with Soft and Hard Labels]]</div>|<div class="cpsessionviewpapertitle">Learning to Recognize Per-Rater’s Emotion Perception Using Co-Rater Training Strategy with Soft and Hard Labels</div><div class="cpsessionviewpaperauthor">[[Huang-Cheng Chou|AUTHOR Huang-Cheng Chou]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-2-8|PAPER Thu-2-2-8 — Empirical Interpretation of Speech Emotion Perception with Attention Based Model for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Empirical Interpretation of Speech Emotion Perception with Attention Based Model for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Md. Asif Jalal|AUTHOR Md. Asif Jalal]], [[Rosanna Milner|AUTHOR Rosanna Milner]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Oliver Niebuhr|
|^&nbsp;|^Jing Wang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2701.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-1|PAPER Thu-2-3-1 — Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella]]</div>|<div class="cpsessionviewpapertitle">Phonetic Accommodation of L2 German Speakers to the Virtual Language Learning Tutor Mirabella</div><div class="cpsessionviewpaperauthor">[[Iona Gessinger|AUTHOR Iona Gessinger]], [[Bernd Möbius|AUTHOR Bernd Möbius]], [[Bistra Andreeva|AUTHOR Bistra Andreeva]], [[Eran Raveh|AUTHOR Eran Raveh]], [[Ingmar Steiner|AUTHOR Ingmar Steiner]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3166.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-2|PAPER Thu-2-3-2 — Characterization of Singaporean Children’s English: Comparisons to American and British Counterparts Using Archetypal Analysis]]</div>|<div class="cpsessionviewpapertitle">Characterization of Singaporean Children’s English: Comparisons to American and British Counterparts Using Archetypal Analysis</div><div class="cpsessionviewpaperauthor">[[Yuling Gu|AUTHOR Yuling Gu]], [[Nancy F. Chen|AUTHOR Nancy F. Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2963.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-3|PAPER Thu-2-3-3 — Rhythmic Convergence in Canadian French Varieties?]]</div>|<div class="cpsessionviewpapertitle">Rhythmic Convergence in Canadian French Varieties?</div><div class="cpsessionviewpaperauthor">[[Svetlana Kaminskaïa|AUTHOR Svetlana Kaminskaïa]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1936.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-4|PAPER Thu-2-3-4 — Malayalam-English Code-Switched: Grapheme to Phoneme System]]</div>|<div class="cpsessionviewpapertitle">Malayalam-English Code-Switched: Grapheme to Phoneme System</div><div class="cpsessionviewpaperauthor">[[Sreeja Manghat|AUTHOR Sreeja Manghat]], [[Sreeram Manghat|AUTHOR Sreeram Manghat]], [[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1460.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-5|PAPER Thu-2-3-5 — Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French]]</div>|<div class="cpsessionviewpapertitle">Ongoing Phonologization of Word-Final Voicing Alternations in Two Romance Languages: Romanian and French</div><div class="cpsessionviewpaperauthor">[[Mathilde Hutin|AUTHOR Mathilde Hutin]], [[Adèle Jatteau|AUTHOR Adèle Jatteau]], [[Ioana Vasilescu|AUTHOR Ioana Vasilescu]], [[Lori Lamel|AUTHOR Lori Lamel]], [[Martine Adda-Decker|AUTHOR Martine Adda-Decker]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2657.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-6|PAPER Thu-2-3-6 — Cues for Perception of Gender in Synthetic Voices and the Role of Identity]]</div>|<div class="cpsessionviewpapertitle">Cues for Perception of Gender in Synthetic Voices and the Role of Identity</div><div class="cpsessionviewpaperauthor">[[Maxwell Hope|AUTHOR Maxwell Hope]], [[Jason Lilley|AUTHOR Jason Lilley]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2696.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-7|PAPER Thu-2-3-7 — Phonetic Entrainment in Cooperative Dialogues: A Case of Russian]]</div>|<div class="cpsessionviewpapertitle">Phonetic Entrainment in Cooperative Dialogues: A Case of Russian</div><div class="cpsessionviewpaperauthor">[[Alla Menshikova|AUTHOR Alla Menshikova]], [[Daniil Kocharov|AUTHOR Daniil Kocharov]], [[Tatiana Kachkovskaia|AUTHOR Tatiana Kachkovskaia]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3231.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-8|PAPER Thu-2-3-8 — Prosodic Characteristics of Genuine and Mock (Im)polite Mandarin Utterances]]</div>|<div class="cpsessionviewpapertitle">Prosodic Characteristics of Genuine and Mock (Im)polite Mandarin Utterances</div><div class="cpsessionviewpaperauthor">[[Chengwei Xu|AUTHOR Chengwei Xu]], [[Wentao Gu|AUTHOR Wentao Gu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1235.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-9|PAPER Thu-2-3-9 — Tone Variations in Regionally Accented Mandarin]]</div>|<div class="cpsessionviewpapertitle">Tone Variations in Regionally Accented Mandarin</div><div class="cpsessionviewpaperauthor">[[Yanping Li|AUTHOR Yanping Li]], [[Catherine T. Best|AUTHOR Catherine T. Best]], [[Michael D. Tyler|AUTHOR Michael D. Tyler]], [[Denis Burnham|AUTHOR Denis Burnham]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2549.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-3-10|PAPER Thu-2-3-10 — F0 Patterns in Mandarin Statements of Mandarin and Cantonese Speakers]]</div>|<div class="cpsessionviewpapertitle">F0 Patterns in Mandarin Statements of Mandarin and Cantonese Speakers</div><div class="cpsessionviewpaperauthor">[[Yike Yang|AUTHOR Yike Yang]], [[Si Chen|AUTHOR Si Chen]], [[Xi Chen|AUTHOR Xi Chen]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Jen-Tzung Chien|
|^&nbsp;|^Laurent Besacier|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1570.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-1|PAPER Thu-2-4-1 — SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering]]</div>|<div class="cpsessionviewpapertitle">SpeechBERT: An Audio-and-Text Jointly Learned Language Model for End-to-End Spoken Question Answering</div><div class="cpsessionviewpaperauthor">[[Yung-Sung Chuang|AUTHOR Yung-Sung Chuang]], [[Chi-Liang Liu|AUTHOR Chi-Liang Liu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]], [[Lin-shan Lee|AUTHOR Lin-shan Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-2|PAPER Thu-2-4-2 — An Audio-Enriched BERT-Based Framework for Spoken Multiple-Choice Question Answering]]</div>|<div class="cpsessionviewpapertitle">An Audio-Enriched BERT-Based Framework for Spoken Multiple-Choice Question Answering</div><div class="cpsessionviewpaperauthor">[[Chia-Chih Kuo|AUTHOR Chia-Chih Kuo]], [[Shang-Bao Luo|AUTHOR Shang-Bao Luo]], [[Kuan-Yu Chen|AUTHOR Kuan-Yu Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1934.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-3|PAPER Thu-2-4-3 — Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching]]</div>|<div class="cpsessionviewpapertitle">Entity Linking for Short Text Using Structured Knowledge Graph via Multi-Grained Text Matching</div><div class="cpsessionviewpaperauthor">[[Binxuan Huang|AUTHOR Binxuan Huang]], [[Han Wang|AUTHOR Han Wang]], [[Tong Wang|AUTHOR Tong Wang]], [[Yue Liu|AUTHOR Yue Liu]], [[Yang Liu|AUTHOR Yang Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-4|PAPER Thu-2-4-4 — Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition]]</div>|<div class="cpsessionviewpapertitle">Sound-Image Grounding Based Focusing Mechanism for Efficient Automatic Spoken Language Acquisition</div><div class="cpsessionviewpaperauthor">[[Mingxin Zhang|AUTHOR Mingxin Zhang]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Wenxin Hou|AUTHOR Wenxin Hou]], [[Shengzhou Gao|AUTHOR Shengzhou Gao]], [[Takahiro Shinozaki|AUTHOR Takahiro Shinozaki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2293.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-5|PAPER Thu-2-4-5 — Semi-Supervised Learning for Character Expression of Spoken Dialogue Systems]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Learning for Character Expression of Spoken Dialogue Systems</div><div class="cpsessionviewpaperauthor">[[Kenta Yamamoto|AUTHOR Kenta Yamamoto]], [[Koji Inoue|AUTHOR Koji Inoue]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1820.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-6|PAPER Thu-2-4-6 — Dimensional Emotion Prediction Based on Interactive Context in Conversation]]</div>|<div class="cpsessionviewpapertitle">Dimensional Emotion Prediction Based on Interactive Context in Conversation</div><div class="cpsessionviewpaperauthor">[[Xiaohan Shi|AUTHOR Xiaohan Shi]], [[Sixia Li|AUTHOR Sixia Li]], [[Jianwu Dang|AUTHOR Jianwu Dang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1261.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-7|PAPER Thu-2-4-7 — HRI-RNN: A User-Robot Dynamics-Oriented RNN for Engagement Decrease Detection]]</div>|<div class="cpsessionviewpapertitle">HRI-RNN: A User-Robot Dynamics-Oriented RNN for Engagement Decrease Detection</div><div class="cpsessionviewpaperauthor">[[Asma Atamna|AUTHOR Asma Atamna]], [[Chloé Clavel|AUTHOR Chloé Clavel]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2785.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-8|PAPER Thu-2-4-8 — Neural Representations of Dialogical History for Improving Upcoming Turn Acoustic Parameters Prediction]]</div>|<div class="cpsessionviewpapertitle">Neural Representations of Dialogical History for Improving Upcoming Turn Acoustic Parameters Prediction</div><div class="cpsessionviewpaperauthor">[[Simone Fuscone|AUTHOR Simone Fuscone]], [[Benoit Favre|AUTHOR Benoit Favre]], [[Laurent Prévot|AUTHOR Laurent Prévot]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1518.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-4-9|PAPER Thu-2-4-9 — Detecting Domain-Specific Credibility and Expertise in Text and Speech]]</div>|<div class="cpsessionviewpapertitle">Detecting Domain-Specific Credibility and Expertise in Text and Speech</div><div class="cpsessionviewpaperauthor">[[Shengli Hu|AUTHOR Shengli Hu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 6|<|
|^Chairs:&nbsp;|^Mengyue Wu|
|^&nbsp;|^Jiajun Zhang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-1|PAPER Thu-2-6-1 — Multimodal Emotion Recognition Using Cross-Modal Attention and 1D Convolutional Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Multimodal Emotion Recognition Using Cross-Modal Attention and 1D Convolutional Neural Networks</div><div class="cpsessionviewpaperauthor">[[Krishna D. N.|AUTHOR Krishna D. N.]], [[Ankita Patil|AUTHOR Ankita Patil]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1683.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-2|PAPER Thu-2-6-2 — Abstractive Spoken Document Summarization Using Hierarchical Model with Multi-Stage Attention Diversity Optimization]]</div>|<div class="cpsessionviewpapertitle">Abstractive Spoken Document Summarization Using Hierarchical Model with Multi-Stage Attention Diversity Optimization</div><div class="cpsessionviewpaperauthor">[[Potsawee Manakul|AUTHOR Potsawee Manakul]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Linlin Wang|AUTHOR Linlin Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1702.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-3|PAPER Thu-2-6-3 — Improved Learning of Word Embeddings with Word Definitions and Semantic Injection]]</div>|<div class="cpsessionviewpapertitle">Improved Learning of Word Embeddings with Word Definitions and Semantic Injection</div><div class="cpsessionviewpaperauthor">[[Yichi Zhang|AUTHOR Yichi Zhang]], [[Yinpei Dai|AUTHOR Yinpei Dai]], [[Zhijian Ou|AUTHOR Zhijian Ou]], [[Huixin Wang|AUTHOR Huixin Wang]], [[Junlan Feng|AUTHOR Junlan Feng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1811.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-4|PAPER Thu-2-6-4 — Wake Word Detection with Alignment-Free Lattice-Free MMI]]</div>|<div class="cpsessionviewpapertitle">Wake Word Detection with Alignment-Free Lattice-Free MMI</div><div class="cpsessionviewpaperauthor">[[Yiming Wang|AUTHOR Yiming Wang]], [[Hang Lv|AUTHOR Hang Lv]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Lei Xie|AUTHOR Lei Xie]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1896.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-5|PAPER Thu-2-6-5 — Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models]]</div>|<div class="cpsessionviewpapertitle">Improving Vietnamese Named Entity Recognition from Speech Using Word Capitalization and Punctuation Recovery Models</div><div class="cpsessionviewpaperauthor">[[Thai Binh Nguyen|AUTHOR Thai Binh Nguyen]], [[Quang Minh Nguyen|AUTHOR Quang Minh Nguyen]], [[Thi Thu Hien Nguyen|AUTHOR Thi Thu Hien Nguyen]], [[Quoc Truong Do|AUTHOR Quoc Truong Do]], [[Chi Mai Luong|AUTHOR Chi Mai Luong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2482.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-6|PAPER Thu-2-6-6 — End-to-End Named Entity Recognition from English Speech]]</div>|<div class="cpsessionviewpapertitle">End-to-End Named Entity Recognition from English Speech</div><div class="cpsessionviewpaperauthor">[[Hemant Yadav|AUTHOR Hemant Yadav]], [[Sreyan Ghosh|AUTHOR Sreyan Ghosh]], [[Yi Yu|AUTHOR Yi Yu]], [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2929.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-7|PAPER Thu-2-6-7 — Semantic Complexity in End-to-End Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Semantic Complexity in End-to-End Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Joseph P. McKenna|AUTHOR Joseph P. McKenna]], [[Samridhi Choudhary|AUTHOR Samridhi Choudhary]], [[Michael Saxon|AUTHOR Michael Saxon]], [[Grant P. Strimel|AUTHOR Grant P. Strimel]], [[Athanasios Mouchtaris|AUTHOR Athanasios Mouchtaris]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3037.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-8|PAPER Thu-2-6-8 — Analysis of Disfluency in Children’s Speech]]</div>|<div class="cpsessionviewpapertitle">Analysis of Disfluency in Children’s Speech</div><div class="cpsessionviewpaperauthor">[[Trang Tran|AUTHOR Trang Tran]], [[Morgan Tinkler|AUTHOR Morgan Tinkler]], [[Gary Yeung|AUTHOR Gary Yeung]], [[Abeer Alwan|AUTHOR Abeer Alwan]], [[Mari Ostendorf|AUTHOR Mari Ostendorf]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-9|PAPER Thu-2-6-9 — Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition]]</div>|<div class="cpsessionviewpapertitle">Representation Based Meta-Learning for Few-Shot Spoken Intent Recognition</div><div class="cpsessionviewpaperauthor">[[Ashish Mittal|AUTHOR Ashish Mittal]], [[Samarth Bharadwaj|AUTHOR Samarth Bharadwaj]], [[Shreya Khare|AUTHOR Shreya Khare]], [[Saneem Chemmengath|AUTHOR Saneem Chemmengath]], [[Karthik Sankaranarayanan|AUTHOR Karthik Sankaranarayanan]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3238.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-6-10|PAPER Thu-2-6-10 — Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation]]</div>|<div class="cpsessionviewpapertitle">Complementary Language Model and Parallel Bi-LRNN for False Trigger Mitigation</div><div class="cpsessionviewpaperauthor">[[Rishika Agarwal|AUTHOR Rishika Agarwal]], [[Xiaochuan Niu|AUTHOR Xiaochuan Niu]], [[Pranay Dighe|AUTHOR Pranay Dighe]], [[Srikanth Vishnubhotla|AUTHOR Srikanth Vishnubhotla]], [[Sameer Badaskar|AUTHOR Sameer Badaskar]], [[Devang Naik|AUTHOR Devang Naik]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Haizhou Li|
|^&nbsp;|^Liang He|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-1|PAPER Thu-2-7-1 — Speaker-Utterance Dual Attention for Speaker and Utterance Verification]]</div>|<div class="cpsessionviewpapertitle">Speaker-Utterance Dual Attention for Speaker and Utterance Verification</div><div class="cpsessionviewpaperauthor">[[Tianchi Liu|AUTHOR Tianchi Liu]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Maulik Madhavi|AUTHOR Maulik Madhavi]], [[Shengmei Shen|AUTHOR Shengmei Shen]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2372.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-2|PAPER Thu-2-7-2 — Adversarial Separation and Adaptation Network for Far-Field Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Adversarial Separation and Adaptation Network for Far-Field Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Lu Yi|AUTHOR Lu Yi]], [[Man-Wai Mak|AUTHOR Man-Wai Mak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2076.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-3|PAPER Thu-2-7-3 — MIRNet: Learning Multiple Identities Representations in Overlapped Speech]]</div>|<div class="cpsessionviewpapertitle">MIRNet: Learning Multiple Identities Representations in Overlapped Speech</div><div class="cpsessionviewpaperauthor">[[Hyewon Han|AUTHOR Hyewon Han]], [[Soo-Whan Chung|AUTHOR Soo-Whan Chung]], [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2092.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-4|PAPER Thu-2-7-4 — Strategies for End-to-End Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Strategies for End-to-End Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Weiwei Lin|AUTHOR Weiwei Lin]], [[Man-Wai Mak|AUTHOR Man-Wai Mak]], [[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-5|PAPER Thu-2-7-5 — Why Did the x-Vector System Miss a Target Speaker? Impact of Acoustic Mismatch Upon Target Score on VoxCeleb Data]]</div>|<div class="cpsessionviewpapertitle">Why Did the x-Vector System Miss a Target Speaker? Impact of Acoustic Mismatch Upon Target Score on VoxCeleb Data</div><div class="cpsessionviewpaperauthor">[[Rosa González Hautamäki|AUTHOR Rosa González Hautamäki]], [[Tomi Kinnunen|AUTHOR Tomi Kinnunen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3006.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-6|PAPER Thu-2-7-6 — Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Variable Frame Rate-Based Data Augmentation to Handle Speaking-Style Variability for Automatic Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Amber Afshan|AUTHOR Amber Afshan]], [[Jinxi Guo|AUTHOR Jinxi Guo]], [[Soo Jin Park|AUTHOR Soo Jin Park]], [[Vijay Ravi|AUTHOR Vijay Ravi]], [[Alan McCree|AUTHOR Alan McCree]], [[Abeer Alwan|AUTHOR Abeer Alwan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2892.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-7|PAPER Thu-2-7-7 — A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning]]</div>|<div class="cpsessionviewpapertitle">A Machine of Few Words: Interactive Speaker Recognition with Reinforcement Learning</div><div class="cpsessionviewpaperauthor">[[Mathieu Seurin|AUTHOR Mathieu Seurin]], [[Florian Strub|AUTHOR Florian Strub]], [[Philippe Preux|AUTHOR Philippe Preux]], [[Olivier Pietquin|AUTHOR Olivier Pietquin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2944.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-8|PAPER Thu-2-7-8 — Improving On-Device Speaker Verification Using Federated Learning with Privacy]]</div>|<div class="cpsessionviewpapertitle">Improving On-Device Speaker Verification Using Federated Learning with Privacy</div><div class="cpsessionviewpaperauthor">[[Filip Granqvist|AUTHOR Filip Granqvist]], [[Matt Seigel|AUTHOR Matt Seigel]], [[Rogier van Dalen|AUTHOR Rogier van Dalen]], [[Áine Cahill|AUTHOR Áine Cahill]], [[Stephen Shum|AUTHOR Stephen Shum]], [[Matthias Paulik|AUTHOR Matthias Paulik]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2699.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-7-9|PAPER Thu-2-7-9 — Neural PLDA Modeling for End-to-End Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Neural PLDA Modeling for End-to-End Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Shreyas Ramoji|AUTHOR Shreyas Ramoji]], [[Prashant Krishnan|AUTHOR Prashant Krishnan]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 8|<|
|^Chairs:&nbsp;|^William Hartmann|
|^&nbsp;|^Wei Xue|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2722.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-1|PAPER Thu-2-8-1 — State Sequence Pooling Training of Acoustic Models for Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">State Sequence Pooling Training of Acoustic Models for Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Kuba Łopatka|AUTHOR Kuba Łopatka]], [[Tobias Bocklet|AUTHOR Tobias Bocklet]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-2|PAPER Thu-2-8-2 — Training Keyword Spotting Models on Non-IID Data with Federated Learning]]</div>|<div class="cpsessionviewpapertitle">Training Keyword Spotting Models on Non-IID Data with Federated Learning</div><div class="cpsessionviewpaperauthor">[[Andrew Hard|AUTHOR Andrew Hard]], [[Kurt Partridge|AUTHOR Kurt Partridge]], [[Cameron Nguyen|AUTHOR Cameron Nguyen]], [[Niranjan Subrahmanya|AUTHOR Niranjan Subrahmanya]], [[Aishanee Shah|AUTHOR Aishanee Shah]], [[Pai Zhu|AUTHOR Pai Zhu]], [[Ignacio Lopez Moreno|AUTHOR Ignacio Lopez Moreno]], [[Rajiv Mathews|AUTHOR Rajiv Mathews]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-3|PAPER Thu-2-8-3 — Class LM and Word Mapping for Contextual Biasing in End-to-End ASR]]</div>|<div class="cpsessionviewpapertitle">Class LM and Word Mapping for Contextual Biasing in End-to-End ASR</div><div class="cpsessionviewpaperauthor">[[Rongqing Huang|AUTHOR Rongqing Huang]], [[Ossama Abdel-hamid|AUTHOR Ossama Abdel-hamid]], [[Xinwei Li|AUTHOR Xinwei Li]], [[Gunnar Evermann|AUTHOR Gunnar Evermann]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1750.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-4|PAPER Thu-2-8-4 — Do End-to-End Speech Recognition Models Care About Context?]]</div>|<div class="cpsessionviewpapertitle">Do End-to-End Speech Recognition Models Care About Context?</div><div class="cpsessionviewpaperauthor">[[Lasse Borgholt|AUTHOR Lasse Borgholt]], [[Jakob D. Havtorn|AUTHOR Jakob D. Havtorn]], [[Željko Agić|AUTHOR Željko Agić]], [[Anders Søgaard|AUTHOR Anders Søgaard]], [[Lars Maaløe|AUTHOR Lars Maaløe]], [[Christian Igel|AUTHOR Christian Igel]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-5|PAPER Thu-2-8-5 — Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios]]</div>|<div class="cpsessionviewpapertitle">Utterance Confidence Measure for End-to-End Speech Recognition with Applications to Distributed Speech Recognition Scenarios</div><div class="cpsessionviewpaperauthor">[[Ankur Kumar|AUTHOR Ankur Kumar]], [[Sachin Singh|AUTHOR Sachin Singh]], [[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]], [[Abhinav Garg|AUTHOR Abhinav Garg]], [[Shatrughan Singh|AUTHOR Shatrughan Singh]], [[Chanwoo Kim|AUTHOR Chanwoo Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2296.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-6|PAPER Thu-2-8-6 — Speaker Code Based Speaker Adaptive Training Using Model Agnostic Meta-Learning]]</div>|<div class="cpsessionviewpapertitle">Speaker Code Based Speaker Adaptive Training Using Model Agnostic Meta-Learning</div><div class="cpsessionviewpaperauthor">[[Huaxin Wu|AUTHOR Huaxin Wu]], [[Genshun Wan|AUTHOR Genshun Wan]], [[Jia Pan|AUTHOR Jia Pan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-7|PAPER Thu-2-8-7 — Domain Adaptation Using Class Similarity for Robust Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Domain Adaptation Using Class Similarity for Robust Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Han Zhu|AUTHOR Han Zhu]], [[Jiangjiang Zhao|AUTHOR Jiangjiang Zhao]], [[Yuling Ren|AUTHOR Yuling Ren]], [[Li Wang|AUTHOR Li Wang]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-8|PAPER Thu-2-8-8 — Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time]]</div>|<div class="cpsessionviewpapertitle">Incremental Machine Speech Chain Towards Enabling Listening While Speaking in Real-Time</div><div class="cpsessionviewpaperauthor">[[Sashi Novitasari|AUTHOR Sashi Novitasari]], [[Andros Tjandra|AUTHOR Andros Tjandra]], [[Tomoya Yanagita|AUTHOR Tomoya Yanagita]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1244.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-9|PAPER Thu-2-8-9 — Context-Dependent Acoustic Modeling Without Explicit Phone Clustering]]</div>|<div class="cpsessionviewpapertitle">Context-Dependent Acoustic Modeling Without Explicit Phone Clustering</div><div class="cpsessionviewpaperauthor">[[Tina Raissi|AUTHOR Tina Raissi]], [[Eugen Beck|AUTHOR Eugen Beck]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1112.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-8-10|PAPER Thu-2-8-10 — Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario]]</div>|<div class="cpsessionviewpapertitle">Voice Conversion Based Data Augmentation to Improve Children’s Speech Recognition in Limited Data Scenario</div><div class="cpsessionviewpaperauthor">[[S. Shahnawazuddin|AUTHOR S. Shahnawazuddin]], [[Nagaraj Adiga|AUTHOR Nagaraj Adiga]], [[Kunal Kumar|AUTHOR Kunal Kumar]], [[Aayushi Poddar|AUTHOR Aayushi Poddar]], [[Waquar Ahmad|AUTHOR Waquar Ahmad]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Yi Xu|
|^&nbsp;|^Keikichi Hirose|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1251.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-1|PAPER Thu-2-9-1 — CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech]]</div>|<div class="cpsessionviewpapertitle">CopyCat: Many-to-Many Fine-Grained Prosody Transfer for Neural Text-to-Speech</div><div class="cpsessionviewpaperauthor">[[Sri Karlapati|AUTHOR Sri Karlapati]], [[Alexis Moinet|AUTHOR Alexis Moinet]], [[Arnaud Joly|AUTHOR Arnaud Joly]], [[Viacheslav Klimkov|AUTHOR Viacheslav Klimkov]], [[Daniel Sáez-Trigueros|AUTHOR Daniel Sáez-Trigueros]], [[Thomas Drugman|AUTHOR Thomas Drugman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1284.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-2|PAPER Thu-2-9-2 — Joint Detection of Sentence Stress and Phrase Boundary for Prosody]]</div>|<div class="cpsessionviewpapertitle">Joint Detection of Sentence Stress and Phrase Boundary for Prosody</div><div class="cpsessionviewpaperauthor">[[Binghuai Lin|AUTHOR Binghuai Lin]], [[Liyuan Wang|AUTHOR Liyuan Wang]], [[Xiaoli Feng|AUTHOR Xiaoli Feng]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1297.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-3|PAPER Thu-2-9-3 — Transfer Learning of the Expressivity Using FLOW Metric Learning in Multispeaker Text-to-Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Transfer Learning of the Expressivity Using FLOW Metric Learning in Multispeaker Text-to-Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Ajinkya Kulkarni|AUTHOR Ajinkya Kulkarni]], [[Vincent Colotte|AUTHOR Vincent Colotte]], [[Denis Jouvet|AUTHOR Denis Jouvet]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-4|PAPER Thu-2-9-4 — Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning]]</div>|<div class="cpsessionviewpapertitle">Speaking Speed Control of End-to-End Speech Synthesis Using Sentence-Level Conditioning</div><div class="cpsessionviewpaperauthor">[[Jae-Sung Bae|AUTHOR Jae-Sung Bae]], [[Hanbin Bae|AUTHOR Hanbin Bae]], [[Young-Sun Joo|AUTHOR Young-Sun Joo]], [[Junmo Lee|AUTHOR Junmo Lee]], [[Gyeong-Hoon Lee|AUTHOR Gyeong-Hoon Lee]], [[Hoon-Young Cho|AUTHOR Hoon-Young Cho]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1411.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-5|PAPER Thu-2-9-5 — Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection]]</div>|<div class="cpsessionviewpapertitle">Dynamic Prosody Generation for Speech Synthesis Using Linguistics-Driven Acoustic Embedding Selection</div><div class="cpsessionviewpaperauthor">[[Shubhi Tyagi|AUTHOR Shubhi Tyagi]], [[Marco Nicolis|AUTHOR Marco Nicolis]], [[Jonas Rohnke|AUTHOR Jonas Rohnke]], [[Thomas Drugman|AUTHOR Thomas Drugman]], [[Jaime Lorenzo-Trueba|AUTHOR Jaime Lorenzo-Trueba]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1430.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-6|PAPER Thu-2-9-6 — Improving the Prosody of RNN-Based English Text-To-Speech Synthesis by Incorporating a BERT Model]]</div>|<div class="cpsessionviewpapertitle">Improving the Prosody of RNN-Based English Text-To-Speech Synthesis by Incorporating a BERT Model</div><div class="cpsessionviewpaperauthor">[[Tom Kenter|AUTHOR Tom Kenter]], [[Manish Sharma|AUTHOR Manish Sharma]], [[Rob Clark|AUTHOR Rob Clark]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1615.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-7|PAPER Thu-2-9-7 — Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction]]</div>|<div class="cpsessionviewpapertitle">Improved Prosody from Learned F0 Codebook Representations for VQ-VAE Speech Waveform Reconstruction</div><div class="cpsessionviewpaperauthor">[[Yi Zhao|AUTHOR Yi Zhao]], [[Haoyu Li|AUTHOR Haoyu Li]], [[Cheng-I Lai|AUTHOR Cheng-I Lai]], [[Jennifer Williams|AUTHOR Jennifer Williams]], [[Erica Cooper|AUTHOR Erica Cooper]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2053.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-8|PAPER Thu-2-9-8 — Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit]]</div>|<div class="cpsessionviewpapertitle">Prosody Learning Mechanism for Speech Synthesis System Without Text Length Limit</div><div class="cpsessionviewpaperauthor">[[Zhen Zeng|AUTHOR Zhen Zeng]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Ning Cheng|AUTHOR Ning Cheng]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2566.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-9|PAPER Thu-2-9-9 — Discriminative Method to Extract Coarse Prosodic Structure and its Application for Statistical Phrase/Accent Command Estimation]]</div>|<div class="cpsessionviewpapertitle">Discriminative Method to Extract Coarse Prosodic Structure and its Application for Statistical Phrase/Accent Command Estimation</div><div class="cpsessionviewpaperauthor">[[Yuma Shirahata|AUTHOR Yuma Shirahata]], [[Daisuke Saito|AUTHOR Daisuke Saito]], [[Nobuaki Minematsu|AUTHOR Nobuaki Minematsu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2861.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-10|PAPER Thu-2-9-10 — Controllable Neural Text-to-Speech Synthesis Using Intuitive Prosodic Features]]</div>|<div class="cpsessionviewpapertitle">Controllable Neural Text-to-Speech Synthesis Using Intuitive Prosodic Features</div><div class="cpsessionviewpaperauthor">[[Tuomo Raitio|AUTHOR Tuomo Raitio]], [[Ramya Rasipuram|AUTHOR Ramya Rasipuram]], [[Dan Castellani|AUTHOR Dan Castellani]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2918.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-11|PAPER Thu-2-9-11 — Controllable Neural Prosody Synthesis]]</div>|<div class="cpsessionviewpapertitle">Controllable Neural Prosody Synthesis</div><div class="cpsessionviewpaperauthor">[[Max Morrison|AUTHOR Max Morrison]], [[Zeyu Jin|AUTHOR Zeyu Jin]], [[Justin Salamon|AUTHOR Justin Salamon]], [[Nicholas J. Bryan|AUTHOR Nicholas J. Bryan]], [[Gautham J. Mysore|AUTHOR Gautham J. Mysore]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2985.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-12|PAPER Thu-2-9-12 — Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency]]</div>|<div class="cpsessionviewpapertitle">Multi-Reference Neural TTS Stylization with Adversarial Cycle Consistency</div><div class="cpsessionviewpaperauthor">[[Matt Whitehill|AUTHOR Matt Whitehill]], [[Shuang Ma|AUTHOR Shuang Ma]], [[Daniel McDuff|AUTHOR Daniel McDuff]], [[Yale Song|AUTHOR Yale Song]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3069.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-2-9-13|PAPER Thu-2-9-13 — Interactive Text-to-Speech System via Joint Style Analysis]]</div>|<div class="cpsessionviewpapertitle">Interactive Text-to-Speech System via Joint Style Analysis</div><div class="cpsessionviewpaperauthor">[[Yang Gao|AUTHOR Yang Gao]], [[Weiyi Zheng|AUTHOR Weiyi Zheng]], [[Zhaojun Yang|AUTHOR Zhaojun Yang]], [[Thilo Köhler|AUTHOR Thilo Köhler]], [[Christian Fuegen|AUTHOR Christian Fuegen]], [[Qing He|AUTHOR Qing He]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Kun Qian|
|^&nbsp;|^Helen Meng|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2531.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-1|PAPER Thu-3-1-1 — Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition]]</div>|<div class="cpsessionviewpapertitle">Squeeze for Sneeze: Compact Neural Networks for Cold and Flu Recognition</div><div class="cpsessionviewpaperauthor">[[Merlin Albes|AUTHOR Merlin Albes]], [[Zhao Ren|AUTHOR Zhao Ren]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2758.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-2|PAPER Thu-3-1-2 — Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression]]</div>|<div class="cpsessionviewpapertitle">Extended Study on the Use of Vocal Tract Variables to Quantify Neuromotor Coordination in Depression</div><div class="cpsessionviewpaperauthor">[[Nadee Seneviratne|AUTHOR Nadee Seneviratne]], [[James R. Williamson|AUTHOR James R. Williamson]], [[Adam C. Lammert|AUTHOR Adam C. Lammert]], [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]], [[Carol Espy-Wilson|AUTHOR Carol Espy-Wilson]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2819.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-3|PAPER Thu-3-1-3 — Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews]]</div>|<div class="cpsessionviewpapertitle">Affective Conditioning on Hierarchical Attention Networks Applied to Depression Detection from Transcribed Clinical Interviews</div><div class="cpsessionviewpaperauthor">[[Danai Xezonaki|AUTHOR Danai Xezonaki]], [[Georgios Paraskevopoulos|AUTHOR Georgios Paraskevopoulos]], [[Alexandros Potamianos|AUTHOR Alexandros Potamianos]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3135.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-4|PAPER Thu-3-1-4 — Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs]]</div>|<div class="cpsessionviewpapertitle">Domain Adaptation for Enhancing Speech-Based Depression Detection in Natural Environmental Conditions Using Dilated CNNs</div><div class="cpsessionviewpaperauthor">[[Zhaocheng Huang|AUTHOR Zhaocheng Huang]], [[Julien Epps|AUTHOR Julien Epps]], [[Dale Joachim|AUTHOR Dale Joachim]], [[Brian Stasak|AUTHOR Brian Stasak]], [[James R. Williamson|AUTHOR James R. Williamson]], [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-5|PAPER Thu-3-1-5 — Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech]]</div>|<div class="cpsessionviewpapertitle">Making a Distinction Between Schizophrenia and Bipolar Disorder Based on Temporal Parameters in Spontaneous Speech</div><div class="cpsessionviewpaperauthor">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]], [[Anita Bagi|AUTHOR Anita Bagi]], [[Szilvia Szalóki|AUTHOR Szilvia Szalóki]], [[István Szendi|AUTHOR István Szendi]], [[Ildikó Hoffmann|AUTHOR Ildikó Hoffmann]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1601.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-6|PAPER Thu-3-1-6 — Prediction of Sleepiness Ratings from Voice by Man and Machine]]</div>|<div class="cpsessionviewpapertitle">Prediction of Sleepiness Ratings from Voice by Man and Machine</div><div class="cpsessionviewpaperauthor">[[Mark Huckvale|AUTHOR Mark Huckvale]], [[András Beke|AUTHOR András Beke]], [[Mirei Ikushima|AUTHOR Mirei Ikushima]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-7|PAPER Thu-3-1-7 — Tongue and Lip Motion Patterns in Alaryngeal Speech]]</div>|<div class="cpsessionviewpapertitle">Tongue and Lip Motion Patterns in Alaryngeal Speech</div><div class="cpsessionviewpaperauthor">[[Kristin J. Teplansky|AUTHOR Kristin J. Teplansky]], [[Alan Wisler|AUTHOR Alan Wisler]], [[Beiming Cao|AUTHOR Beiming Cao]], [[Wendy Liang|AUTHOR Wendy Liang]], [[Chad W. Whited|AUTHOR Chad W. Whited]], [[Ted Mau|AUTHOR Ted Mau]], [[Jun Wang|AUTHOR Jun Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2746.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-8|PAPER Thu-3-1-8 — Autoencoder Bottleneck Features with Multi-Task Optimisation for Improved Continuous Dysarthric Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Autoencoder Bottleneck Features with Multi-Task Optimisation for Improved Continuous Dysarthric Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zhengjun Yue|AUTHOR Zhengjun Yue]], [[Heidi Christensen|AUTHOR Heidi Christensen]], [[Jon Barker|AUTHOR Jon Barker]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2221.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-9|PAPER Thu-3-1-9 — Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM]]</div>|<div class="cpsessionviewpapertitle">Raw Speech Waveform Based Classification of Patients with ALS, Parkinson’s Disease and Healthy Controls Using CNN-BLSTM</div><div class="cpsessionviewpaperauthor">[[Jhansi Mallela|AUTHOR Jhansi Mallela]], [[Aravind Illa|AUTHOR Aravind Illa]], [[Yamini Belur|AUTHOR Yamini Belur]], [[Nalini Atchayaram|AUTHOR Nalini Atchayaram]], [[Ravi Yadav|AUTHOR Ravi Yadav]], [[Pradeep Reddy|AUTHOR Pradeep Reddy]], [[Dipanjan Gope|AUTHOR Dipanjan Gope]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2726.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-1-10|PAPER Thu-3-1-10 — Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis]]</div>|<div class="cpsessionviewpapertitle">Assessment of Parkinson’s Disease Medication State Through Automatic Speech Analysis</div><div class="cpsessionviewpaperauthor">[[Anna Pompili|AUTHOR Anna Pompili]], [[Rubén Solera-Ureña|AUTHOR Rubén Solera-Ureña]], [[Alberto Abad|AUTHOR Alberto Abad]], [[Rita Cardoso|AUTHOR Rita Cardoso]], [[Isabel Guimarães|AUTHOR Isabel Guimarães]], [[Margherita Fabbri|AUTHOR Margherita Fabbri]], [[Isabel P. Martins|AUTHOR Isabel P. Martins]], [[Joaquim Ferreira|AUTHOR Joaquim Ferreira]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Rohit Prabhavalkar|
|^&nbsp;|^Yifan Gong|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1363.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-1|PAPER Thu-3-10-1 — Weak-Attention Suppression for Transformer Based Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Weak-Attention Suppression for Transformer Based Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Yangyang Shi|AUTHOR Yangyang Shi]], [[Yongqiang Wang|AUTHOR Yongqiang Wang]], [[Chunyang Wu|AUTHOR Chunyang Wu]], [[Christian Fuegen|AUTHOR Christian Fuegen]], [[Frank Zhang|AUTHOR Frank Zhang]], [[Duc Le|AUTHOR Duc Le]], [[Ching-Feng Yeh|AUTHOR Ching-Feng Yeh]], [[Michael L. Seltzer|AUTHOR Michael L. Seltzer]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2361.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-2|PAPER Thu-3-10-2 — Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Conv-Transformer Transducer: Low Latency, Low Frame Rate, Streamable End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Wenyong Huang|AUTHOR Wenyong Huang]], [[Wenchao Hu|AUTHOR Wenchao Hu]], [[Yu Ting Yeung|AUTHOR Yu Ting Yeung]], [[Xiao Chen|AUTHOR Xiao Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2007.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-3|PAPER Thu-3-10-3 — Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning]]</div>|<div class="cpsessionviewpapertitle">Improving Transformer-Based Speech Recognition with Unsupervised Pre-Training and Multi-Task Semantic Knowledge Learning</div><div class="cpsessionviewpaperauthor">[[Song Li|AUTHOR Song Li]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]], [[Lingling Liu|AUTHOR Lingling Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2928.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-4|PAPER Thu-3-10-4 — Transformer-Based Long-Context End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Transformer-Based Long-Context End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Takaaki Hori|AUTHOR Takaaki Hori]], [[Niko Moritz|AUTHOR Niko Moritz]], [[Chiori Hori|AUTHOR Chiori Hori]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2556.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-5|PAPER Thu-3-10-5 — Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR]]</div>|<div class="cpsessionviewpapertitle">Self-and-Mixed Attention Decoder with Deep Acoustic Structure for Transformer-Based LVCSR</div><div class="cpsessionviewpaperauthor">[[Xinyuan Zhou|AUTHOR Xinyuan Zhou]], [[Grandee Lee|AUTHOR Grandee Lee]], [[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Yanhua Long|AUTHOR Yanhua Long]], [[Jiaen Liang|AUTHOR Jiaen Liang]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1716.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-6|PAPER Thu-3-10-6 — Universal Speech Transformer]]</div>|<div class="cpsessionviewpapertitle">Universal Speech Transformer</div><div class="cpsessionviewpaperauthor">[[Yingzhu Zhao|AUTHOR Yingzhu Zhao]], [[Chongjia Ni|AUTHOR Chongjia Ni]], [[Cheung-Chi Leung|AUTHOR Cheung-Chi Leung]], [[Shafiq Joty|AUTHOR Shafiq Joty]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Bin Ma|AUTHOR Bin Ma]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2086.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-7|PAPER Thu-3-10-7 — Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Spike-Triggered Non-Autoregressive Transformer for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Ye Bai|AUTHOR Ye Bai]], [[Shuai Zhang|AUTHOR Shuai Zhang]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1198.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-8|PAPER Thu-3-10-8 — Cross Attention with Monotonic Alignment for Speech Transformer]]</div>|<div class="cpsessionviewpapertitle">Cross Attention with Monotonic Alignment for Speech Transformer</div><div class="cpsessionviewpaperauthor">[[Yingzhu Zhao|AUTHOR Yingzhu Zhao]], [[Chongjia Ni|AUTHOR Chongjia Ni]], [[Cheung-Chi Leung|AUTHOR Cheung-Chi Leung]], [[Shafiq Joty|AUTHOR Shafiq Joty]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Bin Ma|AUTHOR Bin Ma]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-9|PAPER Thu-3-10-9 — Conformer: Convolution-augmented Transformer for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Conformer: Convolution-augmented Transformer for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Anmol Gulati|AUTHOR Anmol Gulati]], [[James Qin|AUTHOR James Qin]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[Niki Parmar|AUTHOR Niki Parmar]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Jiahui Yu|AUTHOR Jiahui Yu]], [[Wei Han|AUTHOR Wei Han]], [[Shibo Wang|AUTHOR Shibo Wang]], [[Zhengdong Zhang|AUTHOR Zhengdong Zhang]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Ruoming Pang|AUTHOR Ruoming Pang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2638.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-10-10|PAPER Thu-3-10-10 — Exploring Transformers for Large-Scale Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Exploring Transformers for Large-Scale Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Liang Lu|AUTHOR Liang Lu]], [[Changliang Liu|AUTHOR Changliang Liu]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Yifan Gong|AUTHOR Yifan Gong]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>

</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 11|<|
|^Chairs:&nbsp;|^Masahito Togami|
|^&nbsp;|^Julie Wall|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1168.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-1|PAPER Thu-3-11-1 — Sparseness-Aware DOA Estimation with Majorization Minimization]]</div>|<div class="cpsessionviewpapertitle">Sparseness-Aware DOA Estimation with Majorization Minimization</div><div class="cpsessionviewpaperauthor">[[Masahito Togami|AUTHOR Masahito Togami]], [[Robin Scheibler|AUTHOR Robin Scheibler]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1220.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-2|PAPER Thu-3-11-2 — Spatial Resolution of Early Reflection for Speech and White Noise]]</div>|<div class="cpsessionviewpapertitle">Spatial Resolution of Early Reflection for Speech and White Noise</div><div class="cpsessionviewpaperauthor">[[Xiaoli Zhong|AUTHOR Xiaoli Zhong]], [[Hao Song|AUTHOR Hao Song]], [[Xuejie Liu|AUTHOR Xuejie Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1578.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-3|PAPER Thu-3-11-3 — Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality]]</div>|<div class="cpsessionviewpapertitle">Effect of Microphone Position Measurement Error on RIR and its Impact on Speech Intelligibility and Quality</div><div class="cpsessionviewpaperauthor">[[Aditya Raikar|AUTHOR Aditya Raikar]], [[Karan Nathwani|AUTHOR Karan Nathwani]], [[Ashish Panda|AUTHOR Ashish Panda]], [[Sunil Kumar Kopparapu|AUTHOR Sunil Kumar Kopparapu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2156.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-4|PAPER Thu-3-11-4 — Online Blind Reverberation Time Estimation Using CRNNs]]</div>|<div class="cpsessionviewpapertitle">Online Blind Reverberation Time Estimation Using CRNNs</div><div class="cpsessionviewpaperauthor">[[Shuwen Deng|AUTHOR Shuwen Deng]], [[Wolfgang Mack|AUTHOR Wolfgang Mack]], [[Emanuël A.P. Habets|AUTHOR Emanuël A.P. Habets]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2171.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-5|PAPER Thu-3-11-5 — Single-Channel Blind Direct-to-Reverberation Ratio Estimation Using Masking]]</div>|<div class="cpsessionviewpapertitle">Single-Channel Blind Direct-to-Reverberation Ratio Estimation Using Masking</div><div class="cpsessionviewpaperauthor">[[Wolfgang Mack|AUTHOR Wolfgang Mack]], [[Shuwen Deng|AUTHOR Shuwen Deng]], [[Emanuël A.P. Habets|AUTHOR Emanuël A.P. Habets]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2256.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-6|PAPER Thu-3-11-6 — The Importance of Time-Frequency Averaging for Binaural Speaker Localization in Reverberant Environments]]</div>|<div class="cpsessionviewpapertitle">The Importance of Time-Frequency Averaging for Binaural Speaker Localization in Reverberant Environments</div><div class="cpsessionviewpaperauthor">[[Hanan Beit-On|AUTHOR Hanan Beit-On]], [[Vladimir Tourbabin|AUTHOR Vladimir Tourbabin]], [[Boaz Rafaely|AUTHOR Boaz Rafaely]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2316.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-7|PAPER Thu-3-11-7 — Acoustic Signal Enhancement Using Relative Harmonic Coefficients: Spherical Harmonics Domain Approach]]</div>|<div class="cpsessionviewpapertitle">Acoustic Signal Enhancement Using Relative Harmonic Coefficients: Spherical Harmonics Domain Approach</div><div class="cpsessionviewpaperauthor">[[Yonggang Hu|AUTHOR Yonggang Hu]], [[Prasanga N. Samarasinghe|AUTHOR Prasanga N. Samarasinghe]], [[Thushara D. Abhayapala|AUTHOR Thushara D. Abhayapala]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2462.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-8|PAPER Thu-3-11-8 — Instantaneous Time Delay Estimation of Broadband Signals]]</div>|<div class="cpsessionviewpapertitle">Instantaneous Time Delay Estimation of Broadband Signals</div><div class="cpsessionviewpaperauthor">[[B.H.V.S. Narayana Murthy|AUTHOR B.H.V.S. Narayana Murthy]], [[J.V. Satyanarayana|AUTHOR J.V. Satyanarayana]], [[Nivedita Chennupati|AUTHOR Nivedita Chennupati]], [[B. Yegnanarayana|AUTHOR B. Yegnanarayana]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2493.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-9|PAPER Thu-3-11-9 — U-Net Based Direct-Path Dominance Test for Robust Direction-of-Arrival Estimation]]</div>|<div class="cpsessionviewpapertitle">U-Net Based Direct-Path Dominance Test for Robust Direction-of-Arrival Estimation</div><div class="cpsessionviewpaperauthor">[[Hao Wang|AUTHOR Hao Wang]], [[Kai Chen|AUTHOR Kai Chen]], [[Jing Lu|AUTHOR Jing Lu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-11-10|PAPER Thu-3-11-10 — Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning]]</div>|<div class="cpsessionviewpapertitle">Sound Event Localization and Detection Based on Multiple DOA Beamforming and Multi-Task Learning</div><div class="cpsessionviewpaperauthor">[[Wei Xue|AUTHOR Wei Xue]], [[Ying Tong|AUTHOR Ying Tong]], [[Chao Zhang|AUTHOR Chao Zhang]], [[Guohong Ding|AUTHOR Guohong Ding]], [[Xiaodong He|AUTHOR Xiaodong He]], [[Bowen Zhou|AUTHOR Bowen Zhou]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Brian Mak|
|^&nbsp;|^Morikawa Daisuke|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1044.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-1|PAPER Thu-3-2-1 — Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">Improving Replay Detection System with Channel Consistency DenseNeXt for the ASVspoof 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Chao Zhang|AUTHOR Chao Zhang]], [[Junjie Cheng|AUTHOR Junjie Cheng]], [[Yanmei Gu|AUTHOR Yanmei Gu]], [[Huacan Wang|AUTHOR Huacan Wang]], [[Jun Ma|AUTHOR Jun Ma]], [[Shaojun Wang|AUTHOR Shaojun Wang]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1077.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-2|PAPER Thu-3-2-2 — Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System]]</div>|<div class="cpsessionviewpapertitle">Subjective Quality Evaluation of Speech Signals Transmitted via BPL-PLC Wired System</div><div class="cpsessionviewpaperauthor">[[Przemyslaw Falkowski-Gilski|AUTHOR Przemyslaw Falkowski-Gilski]], [[Grzegorz Debita|AUTHOR Grzegorz Debita]], [[Marcin Habrych|AUTHOR Marcin Habrych]], [[Bogdan Miedzinski|AUTHOR Bogdan Miedzinski]], [[Przemyslaw Jedlikowski|AUTHOR Przemyslaw Jedlikowski]], [[Bartosz Polnik|AUTHOR Bartosz Polnik]], [[Jan Wandzio|AUTHOR Jan Wandzio]], [[Xin Wang|AUTHOR Xin Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-3|PAPER Thu-3-2-3 — Investigating the Visual Lombard Effect with Gabor Based Features]]</div>|<div class="cpsessionviewpapertitle">Investigating the Visual Lombard Effect with Gabor Based Features</div><div class="cpsessionviewpaperauthor">[[Waito Chiu|AUTHOR Waito Chiu]], [[Yan Xu|AUTHOR Yan Xu]], [[Andrew Abel|AUTHOR Andrew Abel]], [[Chun Lin|AUTHOR Chun Lin]], [[Zhengzheng Tu|AUTHOR Zhengzheng Tu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1885.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-4|PAPER Thu-3-2-4 — Exploration of Audio Quality Assessment and Anomaly Localisation Using Attention Models]]</div>|<div class="cpsessionviewpapertitle">Exploration of Audio Quality Assessment and Anomaly Localisation Using Attention Models</div><div class="cpsessionviewpaperauthor">[[Qiang Huang|AUTHOR Qiang Huang]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1899.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-5|PAPER Thu-3-2-5 — Development of a Speech Quality Database Under Uncontrolled Conditions]]</div>|<div class="cpsessionviewpapertitle">Development of a Speech Quality Database Under Uncontrolled Conditions</div><div class="cpsessionviewpaperauthor">[[Alessandro Ragano|AUTHOR Alessandro Ragano]], [[Emmanouil Benetos|AUTHOR Emmanouil Benetos]], [[Andrew Hines|AUTHOR Andrew Hines]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2362.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-6|PAPER Thu-3-2-6 — Evaluating the Reliability of Acoustic Speech Embeddings]]</div>|<div class="cpsessionviewpapertitle">Evaluating the Reliability of Acoustic Speech Embeddings</div><div class="cpsessionviewpaperauthor">[[Robin Algayres|AUTHOR Robin Algayres]], [[Mohamed Salah Zaiem|AUTHOR Mohamed Salah Zaiem]], [[Beno^ıt Sagot|AUTHOR Beno^ıt Sagot]], [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2475.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-7|PAPER Thu-3-2-7 — Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning]]</div>|<div class="cpsessionviewpapertitle">Frame-Level Signal-to-Noise Ratio Estimation Using Deep Learning</div><div class="cpsessionviewpaperauthor">[[Hao Li|AUTHOR Hao Li]], [[DeLiang Wang|AUTHOR DeLiang Wang]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]], [[Guanglai Gao|AUTHOR Guanglai Gao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2809.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-8|PAPER Thu-3-2-8 — A Pyramid Recurrent Network for Predicting Crowdsourced Speech-Quality Ratings of Real-World Signals]]</div>|<div class="cpsessionviewpapertitle">A Pyramid Recurrent Network for Predicting Crowdsourced Speech-Quality Ratings of Real-World Signals</div><div class="cpsessionviewpaperauthor">[[Xuan Dong|AUTHOR Xuan Dong]], [[Donald S. Williamson|AUTHOR Donald S. Williamson]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-9|PAPER Thu-3-2-9 — Effect of Spectral Complexity Reduction and Number of Instruments on Musical Enjoyment with Cochlear Implants]]</div>|<div class="cpsessionviewpapertitle">Effect of Spectral Complexity Reduction and Number of Instruments on Musical Enjoyment with Cochlear Implants</div><div class="cpsessionviewpaperauthor">[[Avamarie Brueggeman|AUTHOR Avamarie Brueggeman]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3088.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-2-10|PAPER Thu-3-2-10 — Spectrum Correction: Acoustic Scene Classification with Mismatched Recording Devices]]</div>|<div class="cpsessionviewpapertitle">Spectrum Correction: Acoustic Scene Classification with Mismatched Recording Devices</div><div class="cpsessionviewpaperauthor">[[Michał Kośmider|AUTHOR Michał Kośmider]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Tom Bäckström|
|^&nbsp;|^Andreas Nautsch|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1977.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-3-1|PAPER Thu-3-3-1 — Distributed Summation Privacy for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Distributed Summation Privacy for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Matt O’Connor|AUTHOR Matt O’Connor]], [[W. Bastiaan Kleijn|AUTHOR W. Bastiaan Kleijn]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2299.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-3-2|PAPER Thu-3-3-2 — Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises]]</div>|<div class="cpsessionviewpapertitle">Perception of Privacy Measured in the Crowd — Paired Comparison on the Effect of Background Noises</div><div class="cpsessionviewpaperauthor">[[Anna Leschanowsky|AUTHOR Anna Leschanowsky]], [[Sneha Das|AUTHOR Sneha Das]], [[Tom Bäckström|AUTHOR Tom Bäckström]], [[Pablo Pérez Zarazaga|AUTHOR Pablo Pérez Zarazaga]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2380.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-3-3|PAPER Thu-3-3-3 — Hide and Speak: Towards Deep Neural Networks for Speech Steganography]]</div>|<div class="cpsessionviewpapertitle">Hide and Speak: Towards Deep Neural Networks for Speech Steganography</div><div class="cpsessionviewpaperauthor">[[Felix Kreuk|AUTHOR Felix Kreuk]], [[Yossi Adi|AUTHOR Yossi Adi]], [[Bhiksha Raj|AUTHOR Bhiksha Raj]], [[Rita Singh|AUTHOR Rita Singh]], [[Joseph Keshet|AUTHOR Joseph Keshet]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2734.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-3-4|PAPER Thu-3-3-4 — Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification]]</div>|<div class="cpsessionviewpapertitle">Detecting Adversarial Examples for Speech Recognition via Uncertainty Quantification</div><div class="cpsessionviewpaperauthor">[[Sina Däubener|AUTHOR Sina Däubener]], [[Lea Schönherr|AUTHOR Lea Schönherr]], [[Asja Fischer|AUTHOR Asja Fischer]], [[Dorothea Kolossa|AUTHOR Dorothea Kolossa]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2208.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-3-5|PAPER Thu-3-3-5 — Privacy Guarantees for De-Identifying Text Transformations]]</div>|<div class="cpsessionviewpapertitle">Privacy Guarantees for De-Identifying Text Transformations</div><div class="cpsessionviewpaperauthor">[[David Ifeoluwa Adelani|AUTHOR David Ifeoluwa Adelani]], [[Ali Davody|AUTHOR Ali Davody]], [[Thomas Kleinbauer|AUTHOR Thomas Kleinbauer]], [[Dietrich Klakow|AUTHOR Dietrich Klakow]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1846.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-3-6|PAPER Thu-3-3-6 — Detecting Audio Attacks on ASR Systems with Dropout Uncertainty]]</div>|<div class="cpsessionviewpapertitle">Detecting Audio Attacks on ASR Systems with Dropout Uncertainty</div><div class="cpsessionviewpaperauthor">[[Tejas Jayashankar|AUTHOR Tejas Jayashankar]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]], [[Pierre Moulin|AUTHOR Pierre Moulin]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Keiichi Tokuda|
|^&nbsp;|^Zhiyong Wu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1066.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-1|PAPER Thu-3-4-1 — Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining]]</div>|<div class="cpsessionviewpapertitle">Voice Transformer Network: Sequence-to-Sequence Voice Conversion Using Transformer with Text-to-Speech Pretraining</div><div class="cpsessionviewpaperauthor">[[Wen-Chin Huang|AUTHOR Wen-Chin Huang]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Hirokazu Kameoka|AUTHOR Hirokazu Kameoka]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1232.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-2|PAPER Thu-3-4-2 — Nonparallel Training of Exemplar-Based Voice Conversion System Using INCA-Based Alignment Technique]]</div>|<div class="cpsessionviewpapertitle">Nonparallel Training of Exemplar-Based Voice Conversion System Using INCA-Based Alignment Technique</div><div class="cpsessionviewpaperauthor">[[Hitoshi Suda|AUTHOR Hitoshi Suda]], [[Gaku Kotani|AUTHOR Gaku Kotani]], [[Daisuke Saito|AUTHOR Daisuke Saito]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1367.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-3|PAPER Thu-3-4-3 — Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System]]</div>|<div class="cpsessionviewpapertitle">Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-Based Voice Conversion System</div><div class="cpsessionviewpaperauthor">[[Chen-Yu Chen|AUTHOR Chen-Yu Chen]], [[Wei-Zhong Zheng|AUTHOR Wei-Zhong Zheng]], [[Syu-Siang Wang|AUTHOR Syu-Siang Wang]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Pei-Chun Li|AUTHOR Pei-Chun Li]], [[Ying-Hui Lai|AUTHOR Ying-Hui Lai]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-4|PAPER Thu-3-4-4 — VQVC+: One-Shot Voice Conversion by Vector Quantization and U-Net Architecture]]</div>|<div class="cpsessionviewpapertitle">VQVC+: One-Shot Voice Conversion by Vector Quantization and U-Net Architecture</div><div class="cpsessionviewpaperauthor">[[Da-Yi Wu|AUTHOR Da-Yi Wu]], [[Yen-Hao Chen|AUTHOR Yen-Hao Chen]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1542.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-5|PAPER Thu-3-4-5 — Cotatron: Transcription-Guided Speech Encoder for Any-to-Many Voice Conversion Without Parallel Data]]</div>|<div class="cpsessionviewpapertitle">Cotatron: Transcription-Guided Speech Encoder for Any-to-Many Voice Conversion Without Parallel Data</div><div class="cpsessionviewpaperauthor">[[Seung-won Park|AUTHOR Seung-won Park]], [[Doo-young Kim|AUTHOR Doo-young Kim]], [[Myun-chul Joe|AUTHOR Myun-chul Joe]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-6|PAPER Thu-3-4-6 — Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Dynamic Speaker Representations Adjustment and Decoder Factorization for Speaker Adaptation in End-to-End Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Ruibo Fu|AUTHOR Ruibo Fu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Tao Wang|AUTHOR Tao Wang]], [[Chunyu Qiang|AUTHOR Chunyu Qiang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1715.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-7|PAPER Thu-3-4-7 — ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data]]</div>|<div class="cpsessionviewpapertitle">ARVC: An Auto-Regressive Voice Conversion System Without Parallel Training Data</div><div class="cpsessionviewpaperauthor">[[Zheng Lian|AUTHOR Zheng Lian]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Xinyong Zhou|AUTHOR Xinyong Zhou]], [[Songbai Pu|AUTHOR Songbai Pu]], [[Shengkai Zhang|AUTHOR Shengkai Zhang]], [[Jianhua Tao|AUTHOR Jianhua Tao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1889.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-8|PAPER Thu-3-4-8 — Improved Zero-Shot Voice Conversion Using Explicit Conditioning Signals]]</div>|<div class="cpsessionviewpapertitle">Improved Zero-Shot Voice Conversion Using Explicit Conditioning Signals</div><div class="cpsessionviewpaperauthor">[[Shahan Nercessian|AUTHOR Shahan Nercessian]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2162.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-9|PAPER Thu-3-4-9 — Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">Non-Parallel Voice Conversion with Fewer Labeled Data by Conditional Generative Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Minchuan Chen|AUTHOR Minchuan Chen]], [[Weijian Hou|AUTHOR Weijian Hou]], [[Jun Ma|AUTHOR Jun Ma]], [[Shaojun Wang|AUTHOR Shaojun Wang]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-10|PAPER Thu-3-4-10 — Transferring Source Style in Non-Parallel Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Transferring Source Style in Non-Parallel Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Songxiang Liu|AUTHOR Songxiang Liu]], [[Yuewen Cao|AUTHOR Yuewen Cao]], [[Shiyin Kang|AUTHOR Shiyin Kang]], [[Na Hu|AUTHOR Na Hu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Dan Su|AUTHOR Dan Su]], [[Dong Yu|AUTHOR Dong Yu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3056.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-4-11|PAPER Thu-3-4-11 — Voice Conversion Using Speech-to-Speech Neuro-Style Transfer]]</div>|<div class="cpsessionviewpapertitle">Voice Conversion Using Speech-to-Speech Neuro-Style Transfer</div><div class="cpsessionviewpaperauthor">[[Ehab A. AlBadawy|AUTHOR Ehab A. AlBadawy]], [[Siwei Lyu|AUTHOR Siwei Lyu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Preethi Jyothi|
|^&nbsp;|^Rathinavelu Chengalvarayan|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2955.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-1|PAPER Thu-3-5-1 — Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation]]</div>|<div class="cpsessionviewpapertitle">Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation</div><div class="cpsessionviewpaperauthor">[[Changhan Wang|AUTHOR Changhan Wang]], [[Juan Pino|AUTHOR Juan Pino]], [[Jiatao Gu|AUTHOR Jiatao Gu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2593.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-2|PAPER Thu-3-5-2 — Transliteration Based Data Augmentation for Training Multilingual ASR Acoustic Models in Low Resource Settings]]</div>|<div class="cpsessionviewpapertitle">Transliteration Based Data Augmentation for Training Multilingual ASR Acoustic Models in Low Resource Settings</div><div class="cpsessionviewpaperauthor">[[Samuel Thomas|AUTHOR Samuel Thomas]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2847.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-3|PAPER Thu-3-5-3 — Multilingual Speech Recognition with Self-Attention Structured Parameterization]]</div>|<div class="cpsessionviewpapertitle">Multilingual Speech Recognition with Self-Attention Structured Parameterization</div><div class="cpsessionviewpaperauthor">[[Yun Zhu|AUTHOR Yun Zhu]], [[Parisa Haghani|AUTHOR Parisa Haghani]], [[Anshuman Tripathi|AUTHOR Anshuman Tripathi]], [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]], [[Brian Farris|AUTHOR Brian Farris]], [[Hainan Xu|AUTHOR Hainan Xu]], [[Han Lu|AUTHOR Han Lu]], [[Hasim Sak|AUTHOR Hasim Sak]], [[Isabel Leal|AUTHOR Isabel Leal]], [[Neeraj Gaur|AUTHOR Neeraj Gaur]], [[Pedro J. Moreno|AUTHOR Pedro J. Moreno]], [[Qian Zhang|AUTHOR Qian Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2919.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-4|PAPER Thu-3-5-4 — Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems]]</div>|<div class="cpsessionviewpapertitle">Lattice-Free Maximum Mutual Information Training of Multilingual Speech Recognition Systems</div><div class="cpsessionviewpaperauthor">[[Srikanth Madikeri|AUTHOR Srikanth Madikeri]], [[Banriskhem K. Khonglah|AUTHOR Banriskhem K. Khonglah]], [[Sibo Tong|AUTHOR Sibo Tong]], [[Petr Motlicek|AUTHOR Petr Motlicek]], [[Hervé Bourlard|AUTHOR Hervé Bourlard]], [[Daniel Povey|AUTHOR Daniel Povey]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2831.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-5|PAPER Thu-3-5-5 — Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters]]</div>|<div class="cpsessionviewpapertitle">Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters</div><div class="cpsessionviewpaperauthor">[[Vineel Pratap|AUTHOR Vineel Pratap]], [[Anuroop Sriram|AUTHOR Anuroop Sriram]], [[Paden Tomasello|AUTHOR Paden Tomasello]], [[Awni Hannun|AUTHOR Awni Hannun]], [[Vitaliy Liptchinsky|AUTHOR Vitaliy Liptchinsky]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2739.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-6|PAPER Thu-3-5-6 — Multilingual Speech Recognition Using Language-Specific Phoneme Recognition as Auxiliary Task for Indian Languages]]</div>|<div class="cpsessionviewpapertitle">Multilingual Speech Recognition Using Language-Specific Phoneme Recognition as Auxiliary Task for Indian Languages</div><div class="cpsessionviewpaperauthor">[[Hardik B. Sailor|AUTHOR Hardik B. Sailor]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2574.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-7|PAPER Thu-3-5-7 — Style Variation as a Vantage Point for Code-Switching]]</div>|<div class="cpsessionviewpapertitle">Style Variation as a Vantage Point for Code-Switching</div><div class="cpsessionviewpaperauthor">[[Khyathi Raghavi Chandu|AUTHOR Khyathi Raghavi Chandu]], [[Alan W. Black|AUTHOR Alan W. Black]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2485.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-8|PAPER Thu-3-5-8 — Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts]]</div>|<div class="cpsessionviewpapertitle">Bi-Encoder Transformer Network for Mandarin-English Code-Switching Speech Recognition Using Mixture of Experts</div><div class="cpsessionviewpaperauthor">[[Yizhou Lu|AUTHOR Yizhou Lu]], [[Mingkun Huang|AUTHOR Mingkun Huang]], [[Hao Li|AUTHOR Hao Li]], [[Jiaqi Guo|AUTHOR Jiaqi Guo]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-9|PAPER Thu-3-5-9 — Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS]]</div>|<div class="cpsessionviewpapertitle">Improving Low Resource Code-Switched ASR Using Augmented Code-Switched TTS</div><div class="cpsessionviewpaperauthor">[[Yash Sharma|AUTHOR Yash Sharma]], [[Basil Abraham|AUTHOR Basil Abraham]], [[Karan Taneja|AUTHOR Karan Taneja]], [[Preethi Jyothi|AUTHOR Preethi Jyothi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1980.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-5-10|PAPER Thu-3-5-10 — Towards Context-Aware End-to-End Code-Switching Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Towards Context-Aware End-to-End Code-Switching Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zimeng Qiu|AUTHOR Zimeng Qiu]], [[Yiyuan Li|AUTHOR Yiyuan Li]], [[Xinjian Li|AUTHOR Xinjian Li]], [[Florian Metze|AUTHOR Florian Metze]], [[William M. Campbell|AUTHOR William M. Campbell]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 6|<|
|^Chairs:&nbsp;|^Sofia Strömbergsson|
|^&nbsp;|^Stuart Cunningham|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1196.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-1|PAPER Thu-3-6-1 — Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency]]</div>|<div class="cpsessionviewpapertitle">Increasing the Intelligibility and Naturalness of Alaryngeal Speech Using Voice Conversion and Synthetic Fundamental Frequency</div><div class="cpsessionviewpaperauthor">[[Tuan Dinh|AUTHOR Tuan Dinh]], [[Alexander Kain|AUTHOR Alexander Kain]], [[Robin Samlan|AUTHOR Robin Samlan]], [[Beiming Cao|AUTHOR Beiming Cao]], [[Jun Wang|AUTHOR Jun Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1997.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-2|PAPER Thu-3-6-2 — Automatic Assessment of Dysarthric Severity Level Using Audio-Video Cross-Modal Approach in Deep Learning]]</div>|<div class="cpsessionviewpapertitle">Automatic Assessment of Dysarthric Severity Level Using Audio-Video Cross-Modal Approach in Deep Learning</div><div class="cpsessionviewpaperauthor">[[Han Tong|AUTHOR Han Tong]], [[Hamid Sharifzadeh|AUTHOR Hamid Sharifzadeh]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1755.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-3|PAPER Thu-3-6-3 — Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription]]</div>|<div class="cpsessionviewpapertitle">Staged Knowledge Distillation for End-to-End Dysarthric Speech Recognition and Speech Attribute Transcription</div><div class="cpsessionviewpaperauthor">[[Yuqin Lin|AUTHOR Yuqin Lin]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Sheng Li|AUTHOR Sheng Li]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Chenchen Ding|AUTHOR Chenchen Ding]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2267.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-4|PAPER Thu-3-6-4 — Dysarthric Speech Recognition Based on Deep Metric Learning]]</div>|<div class="cpsessionviewpapertitle">Dysarthric Speech Recognition Based on Deep Metric Learning</div><div class="cpsessionviewpaperauthor">[[Yuki Takashima|AUTHOR Yuki Takashima]], [[Ryoichi Takashima|AUTHOR Ryoichi Takashima]], [[Tetsuya Takiguchi|AUTHOR Tetsuya Takiguchi]], [[Yasuo Ariki|AUTHOR Yasuo Ariki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2599.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-5|PAPER Thu-3-6-5 — Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks]]</div>|<div class="cpsessionviewpapertitle">Automatic Glottis Detection and Segmentation in Stroboscopic Videos Using Convolutional Networks</div><div class="cpsessionviewpaperauthor">[[Divya Degala|AUTHOR Divya Degala]], [[Achuth Rao M.V.|AUTHOR Achuth Rao M.V.]], [[Rahul Krishnamurthy|AUTHOR Rahul Krishnamurthy]], [[Pebbili Gopikishore|AUTHOR Pebbili Gopikishore]], [[Veeramani Priyadharshini|AUTHOR Veeramani Priyadharshini]], [[Prakash T.K.|AUTHOR Prakash T.K.]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2684.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-6|PAPER Thu-3-6-6 — Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification]]</div>|<div class="cpsessionviewpapertitle">Acoustic Feature Extraction with Interpretable Deep Neural Network for Neurodegenerative Related Disorder Classification</div><div class="cpsessionviewpaperauthor">[[Yilin Pan|AUTHOR Yilin Pan]], [[Bahman Mirheidari|AUTHOR Bahman Mirheidari]], [[Zehai Tu|AUTHOR Zehai Tu]], [[Ronan O’Malley|AUTHOR Ronan O’Malley]], [[Traci Walker|AUTHOR Traci Walker]], [[Annalena Venneri|AUTHOR Annalena Venneri]], [[Markus Reuber|AUTHOR Markus Reuber]], [[Daniel Blackburn|AUTHOR Daniel Blackburn]], [[Heidi Christensen|AUTHOR Heidi Christensen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2768.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-7|PAPER Thu-3-6-7 — Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis]]</div>|<div class="cpsessionviewpapertitle">Coswara — A Database of Breathing, Cough, and Voice Sounds for COVID-19 Diagnosis</div><div class="cpsessionviewpaperauthor">[[Neeraj Sharma|AUTHOR Neeraj Sharma]], [[Prashant Krishnan|AUTHOR Prashant Krishnan]], [[Rohit Kumar|AUTHOR Rohit Kumar]], [[Shreyas Ramoji|AUTHOR Shreyas Ramoji]], [[Srikanth Raj Chetupalli|AUTHOR Srikanth Raj Chetupalli]], [[Nirmala R.|AUTHOR Nirmala R.]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1459.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-8|PAPER Thu-3-6-8 — Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control]]</div>|<div class="cpsessionviewpapertitle">Acoustic-Based Articulatory Phenotypes of Amyotrophic Lateral Sclerosis and Parkinson’s Disease: Towards an Interpretable, Hypothesis-Driven Framework of Motor Control</div><div class="cpsessionviewpaperauthor">[[Hannah P. Rowe|AUTHOR Hannah P. Rowe]], [[Sarah E. Gutz|AUTHOR Sarah E. Gutz]], [[Marc F. Maffei|AUTHOR Marc F. Maffei]], [[Jordan R. Green|AUTHOR Jordan R. Green]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1825.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-9|PAPER Thu-3-6-9 — Recognising Emotions in Dysarthric Speech Using Typical Speech Data]]</div>|<div class="cpsessionviewpapertitle">Recognising Emotions in Dysarthric Speech Using Typical Speech Data</div><div class="cpsessionviewpaperauthor">[[Lubna Alhinti|AUTHOR Lubna Alhinti]], [[Stuart Cunningham|AUTHOR Stuart Cunningham]], [[Heidi Christensen|AUTHOR Heidi Christensen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1598.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-6-10|PAPER Thu-3-6-10 — Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild]]</div>|<div class="cpsessionviewpapertitle">Detecting and Analysing Spontaneous Oral Cancer Speech in the Wild</div><div class="cpsessionviewpaperauthor">[[Bence Mark Halpern|AUTHOR Bence Mark Halpern]], [[Rob van Son|AUTHOR Rob van Son]], [[Michiel van den Brekel|AUTHOR Michiel van den Brekel]], [[Odette Scharenborg|AUTHOR Odette Scharenborg]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Emmanuel Dupoux|
|^&nbsp;|^Ewan Dunba|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2743.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-1|PAPER Thu-3-7-1 — The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units]]</div>|<div class="cpsessionviewpapertitle">The Zero Resource Speech Challenge 2020: Discovering Discrete Subword and Word Units</div><div class="cpsessionviewpaperauthor">[[Ewan Dunbar|AUTHOR Ewan Dunbar]], [[Julien Karadayi|AUTHOR Julien Karadayi]], [[Mathieu Bernard|AUTHOR Mathieu Bernard]], [[Xuan-Nga Cao|AUTHOR Xuan-Nga Cao]], [[Robin Algayres|AUTHOR Robin Algayres]], [[Lucas Ondel|AUTHOR Lucas Ondel]], [[Laurent Besacier|AUTHOR Laurent Besacier]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-2|PAPER Thu-3-7-2 — Vector-Quantized Neural Networks for Acoustic Unit Discovery in the ZeroSpeech 2020 Challenge]]</div>|<div class="cpsessionviewpapertitle">Vector-Quantized Neural Networks for Acoustic Unit Discovery in the ZeroSpeech 2020 Challenge</div><div class="cpsessionviewpaperauthor">[[Benjamin van Niekerk|AUTHOR Benjamin van Niekerk]], [[Leanne Nortje|AUTHOR Leanne Nortje]], [[Herman Kamper|AUTHOR Herman Kamper]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2731.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-3|PAPER Thu-3-7-3 — Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020]]</div>|<div class="cpsessionviewpapertitle">Exploration of End-to-End Synthesisers for Zero Resource Speech Challenge 2020</div><div class="cpsessionviewpaperauthor">[[Karthik Pandia D.S.|AUTHOR Karthik Pandia D.S.]], [[Anusha Prakash|AUTHOR Anusha Prakash]], [[Mano Ranjith Kumar M.|AUTHOR Mano Ranjith Kumar M.]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-4|PAPER Thu-3-7-4 — Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery]]</div>|<div class="cpsessionviewpapertitle">Vector Quantized Temporally-Aware Correspondence Sparse Autoencoders for Zero-Resource Acoustic Unit Discovery</div><div class="cpsessionviewpaperauthor">[[Batuhan Gundogdu|AUTHOR Batuhan Gundogdu]], [[Bolaji Yusuf|AUTHOR Bolaji Yusuf]], [[Mansur Yesilbursa|AUTHOR Mansur Yesilbursa]], [[Murat Saraclar|AUTHOR Murat Saraclar]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-5|PAPER Thu-3-7-5 — Transformer VQ-VAE for Unsupervised Unit Discovery and Speech Synthesis: ZeroSpeech 2020 Challenge]]</div>|<div class="cpsessionviewpapertitle">Transformer VQ-VAE for Unsupervised Unit Discovery and Speech Synthesis: ZeroSpeech 2020 Challenge</div><div class="cpsessionviewpaperauthor">[[Andros Tjandra|AUTHOR Andros Tjandra]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3127.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-6|PAPER Thu-3-7-6 — Exploring TTS Without T Using Biologically/Psychologically Motivated Neural Network Modules (ZeroSpeech 2020)]]</div>|<div class="cpsessionviewpapertitle">Exploring TTS Without T Using Biologically/Psychologically Motivated Neural Network Modules (ZeroSpeech 2020)</div><div class="cpsessionviewpaperauthor">[[Takashi Morita|AUTHOR Takashi Morita]], [[Hiroki Koda|AUTHOR Hiroki Koda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-7|PAPER Thu-3-7-7 — Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling]]</div>|<div class="cpsessionviewpapertitle">Cyclic Spectral Modeling for Unsupervised Unit Discovery into Voice Conversion with Excitation and Waveform Modeling</div><div class="cpsessionviewpaperauthor">[[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1785.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-8|PAPER Thu-3-7-8 — Unsupervised Acoustic Unit Representation Learning for Voice Conversion Using WaveNet Auto-Encoders]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Acoustic Unit Representation Learning for Voice Conversion Using WaveNet Auto-Encoders</div><div class="cpsessionviewpaperauthor">[[Mingjie Chen|AUTHOR Mingjie Chen]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1738.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-9|PAPER Thu-3-7-9 — Unsupervised Discovery of Recurring Speech Patterns Using Probabilistic Adaptive Metrics]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Discovery of Recurring Speech Patterns Using Probabilistic Adaptive Metrics</div><div class="cpsessionviewpaperauthor">[[Okko Räsänen|AUTHOR Okko Räsänen]], [[María Andrea Cruz Blandón|AUTHOR María Andrea Cruz Blandón]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3000.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-10|PAPER Thu-3-7-10 — Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery]]</div>|<div class="cpsessionviewpapertitle">Self-Expressing Autoencoders for Unsupervised Spoken Term Discovery</div><div class="cpsessionviewpaperauthor">[[Saurabhchand Bhati|AUTHOR Saurabhchand Bhati]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Piotr Żelasko|AUTHOR Piotr Żelasko]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1671.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-11|PAPER Thu-3-7-11 — Perceptimatic: A Human Speech Perception Benchmark for Unsupervised Subword Modelling]]</div>|<div class="cpsessionviewpapertitle">Perceptimatic: A Human Speech Perception Benchmark for Unsupervised Subword Modelling</div><div class="cpsessionviewpaperauthor">[[Juliette Millet|AUTHOR Juliette Millet]], [[Ewan Dunbar|AUTHOR Ewan Dunbar]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2745.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-12|PAPER Thu-3-7-12 — Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset]]</div>|<div class="cpsessionviewpapertitle">Decoding Imagined, Heard, and Spoken Speech: Classification and Regression of EEG Using a 14-Channel Dry-Contact Mobile Headset</div><div class="cpsessionviewpaperauthor">[[Jonathan Clayton|AUTHOR Jonathan Clayton]], [[Scott Wellington|AUTHOR Scott Wellington]], [[Cassia Valentini-Botinhao|AUTHOR Cassia Valentini-Botinhao]], [[Oliver Watts|AUTHOR Oliver Watts]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-13|PAPER Thu-3-7-13 — Glottal Closure Instants Detection from EGG Signal by Classification Approach]]</div>|<div class="cpsessionviewpapertitle">Glottal Closure Instants Detection from EGG Signal by Classification Approach</div><div class="cpsessionviewpaperauthor">[[Gurunath Reddy M.|AUTHOR Gurunath Reddy M.]], [[K. Sreenivasa Rao|AUTHOR K. Sreenivasa Rao]], [[Partha Pratim Das|AUTHOR Partha Pratim Das]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-7-14|PAPER Thu-3-7-14 — Classify Imaginary Mandarin Tones with Cortical EEG Signals]]</div>|<div class="cpsessionviewpapertitle">Classify Imaginary Mandarin Tones with Cortical EEG Signals</div><div class="cpsessionviewpaperauthor">[[Hua Li|AUTHOR Hua Li]], [[Fei Chen|AUTHOR Fei Chen]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 8|<|
|^Chairs:&nbsp;|^Ebru Arisoy|
|^&nbsp;|^Peter Bell|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2001.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-1|PAPER Thu-3-8-1 — Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework]]</div>|<div class="cpsessionviewpapertitle">Augmenting Images for ASR and TTS Through Single-Loop and Dual-Loop Multimodal Chain Framework</div><div class="cpsessionviewpaperauthor">[[Johanes Effendi|AUTHOR Johanes Effendi]], [[Andros Tjandra|AUTHOR Andros Tjandra]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1250.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-2|PAPER Thu-3-8-2 — Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?]]</div>|<div class="cpsessionviewpapertitle">Punctuation Prediction in Spontaneous Conversations: Can We Mitigate ASR Errors with Retrofitted Word Embeddings?</div><div class="cpsessionviewpaperauthor">[[Łukasz Augustyniak|AUTHOR Łukasz Augustyniak]], [[Piotr Szymański|AUTHOR Piotr Szymański]], [[Mikołaj Morzy|AUTHOR Mikołaj Morzy]], [[Piotr Żelasko|AUTHOR Piotr Żelasko]], [[Adrian Szymczak|AUTHOR Adrian Szymczak]], [[Jan Mizgajski|AUTHOR Jan Mizgajski]], [[Yishay Carmiel|AUTHOR Yishay Carmiel]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3074.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-3|PAPER Thu-3-8-3 — Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech]]</div>|<div class="cpsessionviewpapertitle">Multimodal Semi-Supervised Learning Framework for Punctuation Prediction in Conversational Speech</div><div class="cpsessionviewpaperauthor">[[Monica Sunkara|AUTHOR Monica Sunkara]], [[Srikanth Ronanki|AUTHOR Srikanth Ronanki]], [[Dhanush Bekal|AUTHOR Dhanush Bekal]], [[Sravan Bodapati|AUTHOR Sravan Bodapati]], [[Katrin Kirchhoff|AUTHOR Katrin Kirchhoff]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2909.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-4|PAPER Thu-3-8-4 — Efficient MDI Adaptation for n-Gram Language Models]]</div>|<div class="cpsessionviewpapertitle">Efficient MDI Adaptation for n-Gram Language Models</div><div class="cpsessionviewpaperauthor">[[Ruizhe Huang|AUTHOR Ruizhe Huang]], [[Ke Li|AUTHOR Ke Li]], [[Ashish Arora|AUTHOR Ashish Arora]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1465.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-5|PAPER Thu-3-8-5 — Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus]]</div>|<div class="cpsessionviewpapertitle">Improving Tail Performance of a Deliberation E2E ASR Model Using a Large Text Corpus</div><div class="cpsessionviewpaperauthor">[[Cal Peyser|AUTHOR Cal Peyser]], [[Sepand Mavandadi|AUTHOR Sepand Mavandadi]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[James Apfel|AUTHOR James Apfel]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[Shankar Kumar|AUTHOR Shankar Kumar]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1524.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-6|PAPER Thu-3-8-6 — Language Model Data Augmentation Based on Text Domain Transfer]]</div>|<div class="cpsessionviewpapertitle">Language Model Data Augmentation Based on Text Domain Transfer</div><div class="cpsessionviewpaperauthor">[[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Naohiro Tawara|AUTHOR Naohiro Tawara]], [[Marc Delcroix|AUTHOR Marc Delcroix]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1207.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-7|PAPER Thu-3-8-7 — Contemporary Polish Language Model (Version 2) Using Big Data and Sub-Word Approach]]</div>|<div class="cpsessionviewpapertitle">Contemporary Polish Language Model (Version 2) Using Big Data and Sub-Word Approach</div><div class="cpsessionviewpaperauthor">[[Krzysztof Wołk|AUTHOR Krzysztof Wołk]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2514.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-8|PAPER Thu-3-8-8 — Improving Speech Recognition of Compound-Rich Languages]]</div>|<div class="cpsessionviewpapertitle">Improving Speech Recognition of Compound-Rich Languages</div><div class="cpsessionviewpaperauthor">[[Prabhat Pandey|AUTHOR Prabhat Pandey]], [[Volker Leutnant|AUTHOR Volker Leutnant]], [[Simon Wiesler|AUTHOR Simon Wiesler]], [[Jahn Heymann|AUTHOR Jahn Heymann]], [[Daniel Willett|AUTHOR Daniel Willett]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1586.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-8-9|PAPER Thu-3-8-9 — Language Modeling for Speech Analytics in Under-Resourced Languages]]</div>|<div class="cpsessionviewpapertitle">Language Modeling for Speech Analytics in Under-Resourced Languages</div><div class="cpsessionviewpaperauthor">[[Simone Wills|AUTHOR Simone Wills]], [[Pieter Uys|AUTHOR Pieter Uys]], [[Charl van Heerden|AUTHOR Charl van Heerden]], [[Etienne Barnard|AUTHOR Etienne Barnard]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Thursday 29 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Nicholas Cummins|
|^&nbsp;|^Staho David|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2223.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-1|PAPER Thu-3-9-1 — An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety]]</div>|<div class="cpsessionviewpapertitle">An Early Study on Intelligent Analysis of Speech Under COVID-19: Severity, Sleep Quality, Fatigue, and Anxiety</div><div class="cpsessionviewpaperauthor">[[Jing Han|AUTHOR Jing Han]], [[Kun Qian|AUTHOR Kun Qian]], [[Meishu Song|AUTHOR Meishu Song]], [[Zijiang Yang|AUTHOR Zijiang Yang]], [[Zhao Ren|AUTHOR Zhao Ren]], [[Shuo Liu|AUTHOR Shuo Liu]], [[Juan Liu|AUTHOR Juan Liu]], [[Huaiyuan Zheng|AUTHOR Huaiyuan Zheng]], [[Wei Ji|AUTHOR Wei Ji]], [[Tomoya Koike|AUTHOR Tomoya Koike]], [[Xiao Li|AUTHOR Xiao Li]], [[Zixing Zhang|AUTHOR Zixing Zhang]], [[Yoshiharu Yamamoto|AUTHOR Yoshiharu Yamamoto]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1801.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-2|PAPER Thu-3-9-2 — An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels]]</div>|<div class="cpsessionviewpapertitle">An Evaluation of the Effect of Anxiety on Speech — Computational Prediction of Anxiety from Sustained Vowels</div><div class="cpsessionviewpaperauthor">[[Alice Baird|AUTHOR Alice Baird]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Sebastian Schnieder|AUTHOR Sebastian Schnieder]], [[Jarek Krajewski|AUTHOR Jarek Krajewski]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2396.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-3|PAPER Thu-3-9-3 — Hybrid Network Feature Extraction for Depression Assessment from Speech]]</div>|<div class="cpsessionviewpapertitle">Hybrid Network Feature Extraction for Depression Assessment from Speech</div><div class="cpsessionviewpaperauthor">[[Ziping Zhao|AUTHOR Ziping Zhao]], [[Qifei Li|AUTHOR Qifei Li]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Bin Liu|AUTHOR Bin Liu]], [[Haishuai Wang|AUTHOR Haishuai Wang]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-4|PAPER Thu-3-9-4 — Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction]]</div>|<div class="cpsessionviewpapertitle">Improving Detection of Alzheimer’s Disease Using Automatic Speech Recognition to Identify High-Quality Segments for More Robust Feature Extraction</div><div class="cpsessionviewpaperauthor">[[Yilin Pan|AUTHOR Yilin Pan]], [[Bahman Mirheidari|AUTHOR Bahman Mirheidari]], [[Markus Reuber|AUTHOR Markus Reuber]], [[Annalena Venneri|AUTHOR Annalena Venneri]], [[Daniel Blackburn|AUTHOR Daniel Blackburn]], [[Heidi Christensen|AUTHOR Heidi Christensen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2724.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-5|PAPER Thu-3-9-5 — Classification of Manifest Huntington Disease Using Vowel Distortion Measures]]</div>|<div class="cpsessionviewpapertitle">Classification of Manifest Huntington Disease Using Vowel Distortion Measures</div><div class="cpsessionviewpaperauthor">[[Amrit Romana|AUTHOR Amrit Romana]], [[John Bandon|AUTHOR John Bandon]], [[Noelle Carlozzi|AUTHOR Noelle Carlozzi]], [[Angela Roberts|AUTHOR Angela Roberts]], [[Emily Mower Provost|AUTHOR Emily Mower Provost]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3197.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-6|PAPER Thu-3-9-6 — Parkinson’s Disease Detection from Speech Using Single Frequency Filtering Cepstral Coefficients]]</div>|<div class="cpsessionviewpapertitle">Parkinson’s Disease Detection from Speech Using Single Frequency Filtering Cepstral Coefficients</div><div class="cpsessionviewpaperauthor">[[Sudarsana Reddy Kadiri|AUTHOR Sudarsana Reddy Kadiri]], [[Rashmi Kethireddy|AUTHOR Rashmi Kethireddy]], [[Paavo Alku|AUTHOR Paavo Alku]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1431.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-7|PAPER Thu-3-9-7 — Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer]]</div>|<div class="cpsessionviewpapertitle">Automatic Prediction of Speech Intelligibility Based on X-Vectors in the Context of Head and Neck Cancer</div><div class="cpsessionviewpaperauthor">[[Sebastião Quintas|AUTHOR Sebastião Quintas]], [[Julie Mauclair|AUTHOR Julie Mauclair]], [[Virginie Woisard|AUTHOR Virginie Woisard]], [[Julien Pinquier|AUTHOR Julien Pinquier]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1805.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-8|PAPER Thu-3-9-8 — Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study]]</div>|<div class="cpsessionviewpapertitle">Spectral Moment and Duration of Burst of Plosives in Speech of Children with Hearing Impairment and Typically Developing Children — A Comparative Study</div><div class="cpsessionviewpaperauthor">[[Ajish K. Abraham|AUTHOR Ajish K. Abraham]], [[M. Pushpavathi|AUTHOR M. Pushpavathi]], [[N. Sreedevi|AUTHOR N. Sreedevi]], [[A. Navya|AUTHOR A. Navya]], [[C.M. Vikram|AUTHOR C.M. Vikram]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2049.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-9|PAPER Thu-3-9-9 — Aphasic Speech Recognition Using a Mixture of Speech Intelligibility Experts]]</div>|<div class="cpsessionviewpapertitle">Aphasic Speech Recognition Using a Mixture of Speech Intelligibility Experts</div><div class="cpsessionviewpaperauthor">[[Matthew Perez|AUTHOR Matthew Perez]], [[Zakaria Aldeneh|AUTHOR Zakaria Aldeneh]], [[Emily Mower Provost|AUTHOR Emily Mower Provost]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2253.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-3-9-10|PAPER Thu-3-9-10 — Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features]]</div>|<div class="cpsessionviewpapertitle">Automatic Discrimination of Apraxia of Speech and Dysarthria Using a Minimalistic Set of Handcrafted Features</div><div class="cpsessionviewpaperauthor">[[Ina Kodrasi|AUTHOR Ina Kodrasi]], [[Michaela Pernon|AUTHOR Michaela Pernon]], [[Marina Laganaro|AUTHOR Marina Laganaro]], [[Hervé Bourlard|AUTHOR Hervé Bourlard]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Thursday 29 October 2020, Room 6|<|
|^Chairs:&nbsp;|^Jim Glass|
|^&nbsp;|^Shinnosuke Takamichi|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1212.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-1|PAPER Thu-SS-1-6-1 — Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Jointly Fine-Tuning “BERT-Like” Self Supervised Models to Improve Multimodal Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Shamane Siriwardhana|AUTHOR Shamane Siriwardhana]], [[Andrew Reis|AUTHOR Andrew Reis]], [[Rivindu Weerasekera|AUTHOR Rivindu Weerasekera]], [[Suranga Nanayakkara|AUTHOR Suranga Nanayakkara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-2|PAPER Thu-SS-1-6-2 — Vector-Quantized Autoregressive Predictive Coding]]</div>|<div class="cpsessionviewpapertitle">Vector-Quantized Autoregressive Predictive Coding</div><div class="cpsessionviewpaperauthor">[[Yu-An Chung|AUTHOR Yu-An Chung]], [[Hao Tang|AUTHOR Hao Tang]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1511.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-3|PAPER Thu-SS-1-6-3 — Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks]]</div>|<div class="cpsessionviewpapertitle">Speech-XLNet: Unsupervised Acoustic Model Pretraining for Self-Attention Networks</div><div class="cpsessionviewpaperauthor">[[Xingchen Song|AUTHOR Xingchen Song]], [[Guangsen Wang|AUTHOR Guangsen Wang]], [[Yiheng Huang|AUTHOR Yiheng Huang]], [[Zhiyong Wu|AUTHOR Zhiyong Wu]], [[Dan Su|AUTHOR Dan Su]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1917.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-4|PAPER Thu-SS-1-6-4 — Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR]]</div>|<div class="cpsessionviewpapertitle">Large Scale Weakly and Semi-Supervised Learning for Low-Resource Video ASR</div><div class="cpsessionviewpaperauthor">[[Kritika Singh|AUTHOR Kritika Singh]], [[Vimal Manohar|AUTHOR Vimal Manohar]], [[Alex Xiao|AUTHOR Alex Xiao]], [[Sergey Edunov|AUTHOR Sergey Edunov]], [[Ross Girshick|AUTHOR Ross Girshick]], [[Vitaliy Liptchinsky|AUTHOR Vitaliy Liptchinsky]], [[Christian Fuegen|AUTHOR Christian Fuegen]], [[Yatharth Saraf|AUTHOR Yatharth Saraf]], [[Geoffrey Zweig|AUTHOR Geoffrey Zweig]], [[Abdelrahman Mohamed|AUTHOR Abdelrahman Mohamed]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2020.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-5|PAPER Thu-SS-1-6-5 — Sequence-Level Self-Learning with Multiple Hypotheses]]</div>|<div class="cpsessionviewpapertitle">Sequence-Level Self-Learning with Multiple Hypotheses</div><div class="cpsessionviewpaperauthor">[[Kenichi Kumatani|AUTHOR Kenichi Kumatani]], [[Dimitrios Dimitriadis|AUTHOR Dimitrios Dimitriadis]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Robert Gmyr|AUTHOR Robert Gmyr]], [[Sefik Emre Eskimez|AUTHOR Sefik Emre Eskimez]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Michael Zeng|AUTHOR Michael Zeng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2026.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-6|PAPER Thu-SS-1-6-6 — Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning]]</div>|<div class="cpsessionviewpapertitle">Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning</div><div class="cpsessionviewpaperauthor">[[Haibin Wu|AUTHOR Haibin Wu]], [[Andy T. Liu|AUTHOR Andy T. Liu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2231.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-7|PAPER Thu-SS-1-6-7 — Understanding Self-Attention of Self-Supervised Audio Transformers]]</div>|<div class="cpsessionviewpapertitle">Understanding Self-Attention of Self-Supervised Audio Transformers</div><div class="cpsessionviewpaperauthor">[[Shu-wen Yang|AUTHOR Shu-wen Yang]], [[Andy T. Liu|AUTHOR Andy T. Liu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3084.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-8|PAPER Thu-SS-1-6-8 — A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning]]</div>|<div class="cpsessionviewpapertitle">A Convolutional Deep Markov Model for Unsupervised Speech Representation Learning</div><div class="cpsessionviewpaperauthor">[[Sameer Khurana|AUTHOR Sameer Khurana]], [[Antoine Laurent|AUTHOR Antoine Laurent]], [[Wei-Ning Hsu|AUTHOR Wei-Ning Hsu]], [[Jan Chorowski|AUTHOR Jan Chorowski]], [[Adrian Lancucki|AUTHOR Adrian Lancucki]], [[Ricard Marxer|AUTHOR Ricard Marxer]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2829.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-1-6-9|PAPER Thu-SS-1-6-9 — Automatic Speech Recognition for ILSE-Interviews: Longitudinal Conversational Speech Recordings Covering Aging and Cognitive Decline]]</div>|<div class="cpsessionviewpapertitle">Automatic Speech Recognition for ILSE-Interviews: Longitudinal Conversational Speech Recordings Covering Aging and Cognitive Decline</div><div class="cpsessionviewpaperauthor">[[Ayimunishagu Abulimiti|AUTHOR Ayimunishagu Abulimiti]], [[Jochen Weiner|AUTHOR Jochen Weiner]], [[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Thursday 29 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Rohan Kumar Das|
|^&nbsp;|^Tomi Kinnunen|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1052.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-2-5-1|PAPER Thu-SS-2-5-1 — The Attacker’s Perspective on Automatic Speaker Verification: An Overview]]</div>|<div class="cpsessionviewpapertitle">The Attacker’s Perspective on Automatic Speaker Verification: An Overview</div><div class="cpsessionviewpaperauthor">[[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Xiaohai Tian|AUTHOR Xiaohai Tian]], [[Tomi Kinnunen|AUTHOR Tomi Kinnunen]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1090.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-2-5-2|PAPER Thu-SS-2-5-2 — Extrapolating False Alarm Rates in Automatic Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Extrapolating False Alarm Rates in Automatic Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Alexey Sholokhov|AUTHOR Alexey Sholokhov]], [[Tomi Kinnunen|AUTHOR Tomi Kinnunen]], [[Ville Vestman|AUTHOR Ville Vestman]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-2-5-3|PAPER Thu-SS-2-5-3 — Self-Supervised Spoofing Audio Detection Scheme]]</div>|<div class="cpsessionviewpapertitle">Self-Supervised Spoofing Audio Detection Scheme</div><div class="cpsessionviewpaperauthor">[[Ziyue Jiang|AUTHOR Ziyue Jiang]], [[Hongcheng Zhu|AUTHOR Hongcheng Zhu]], [[Li Peng|AUTHOR Li Peng]], [[Wenbing Ding|AUTHOR Wenbing Ding]], [[Yanzhen Ren|AUTHOR Yanzhen Ren]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1955.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-2-5-4|PAPER Thu-SS-2-5-4 — Inaudible Adversarial Perturbations for Targeted Attack in Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Inaudible Adversarial Perturbations for Targeted Attack in Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Qing Wang|AUTHOR Qing Wang]], [[Pengcheng Guo|AUTHOR Pengcheng Guo]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2458.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-2-5-5|PAPER Thu-SS-2-5-5 — x-Vectors Meet Adversarial Attacks: Benchmarking Adversarial Robustness in Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">x-Vectors Meet Adversarial Attacks: Benchmarking Adversarial Robustness in Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Jesús Villalba|AUTHOR Jesús Villalba]], [[Yuekai Zhang|AUTHOR Yuekai Zhang]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2834.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-2-5-6|PAPER Thu-SS-2-5-6 — Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples]]</div>|<div class="cpsessionviewpapertitle">Black-Box Attacks on Spoofing Countermeasures Using Transferability of Adversarial Examples</div><div class="cpsessionviewpaperauthor">[[Yuekai Zhang|AUTHOR Yuekai Zhang]], [[Ziyan Jiang|AUTHOR Ziyan Jiang]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Laurent Besacier|
|^&nbsp;|^Sakriani Sakti|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1148.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-1-1|PAPER Tue-1-1-1 — A DNN-HMM-DNN Hybrid Model for Discovering Word-Like Units from Spoken Captions and Image Regions]]</div>|<div class="cpsessionviewpapertitle">A DNN-HMM-DNN Hybrid Model for Discovering Word-Like Units from Spoken Captions and Image Regions</div><div class="cpsessionviewpaperauthor">[[Liming Wang|AUTHOR Liming Wang]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1241.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-1-2|PAPER Tue-1-1-2 — Efficient Wait-k Models for Simultaneous Machine Translation]]</div>|<div class="cpsessionviewpapertitle">Efficient Wait-k Models for Simultaneous Machine Translation</div><div class="cpsessionviewpaperauthor">[[Maha Elbayad|AUTHOR Maha Elbayad]], [[Laurent Besacier|AUTHOR Laurent Besacier]], [[Jakob Verbeek|AUTHOR Jakob Verbeek]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1835.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-1-3|PAPER Tue-1-1-3 — Investigating Self-Supervised Pre-Training for End-to-End Speech Translation]]</div>|<div class="cpsessionviewpapertitle">Investigating Self-Supervised Pre-Training for End-to-End Speech Translation</div><div class="cpsessionviewpaperauthor">[[Ha Nguyen|AUTHOR Ha Nguyen]], [[Fethi Bougares|AUTHOR Fethi Bougares]], [[N. Tomashenko|AUTHOR N. Tomashenko]], [[Yannick Estève|AUTHOR Yannick Estève]], [[Laurent Besacier|AUTHOR Laurent Besacier]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2860.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-1-4|PAPER Tue-1-1-4 — Contextualized Translation of Automatically Segmented Speech]]</div>|<div class="cpsessionviewpapertitle">Contextualized Translation of Automatically Segmented Speech</div><div class="cpsessionviewpaperauthor">[[Marco Gaido|AUTHOR Marco Gaido]], [[Mattia A. Di Gangi|AUTHOR Mattia A. Di Gangi]], [[Matteo Negri|AUTHOR Matteo Negri]], [[Mauro Cettolo|AUTHOR Mauro Cettolo]], [[Marco Turchi|AUTHOR Marco Turchi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-1-5|PAPER Tue-1-1-5 — Self-Training for End-to-End Speech Translation]]</div>|<div class="cpsessionviewpapertitle">Self-Training for End-to-End Speech Translation</div><div class="cpsessionviewpaperauthor">[[Juan Pino|AUTHOR Juan Pino]], [[Qiantong Xu|AUTHOR Qiantong Xu]], [[Xutai Ma|AUTHOR Xutai Ma]], [[Mohammad Javad Dousti|AUTHOR Mohammad Javad Dousti]], [[Yun Tang|AUTHOR Yun Tang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2983.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-1-6|PAPER Tue-1-1-6 — Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing]]</div>|<div class="cpsessionviewpapertitle">Evaluating and Optimizing Prosodic Alignment for Automatic Dubbing</div><div class="cpsessionviewpaperauthor">[[Marcello Federico|AUTHOR Marcello Federico]], [[Yogesh Virkar|AUTHOR Yogesh Virkar]], [[Robert Enyedi|AUTHOR Robert Enyedi]], [[Roberto Barra-Chicote|AUTHOR Roberto Barra-Chicote]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3078.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-1-7|PAPER Tue-1-1-7 — Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets]]</div>|<div class="cpsessionviewpapertitle">Pair Expansion for Learning Multilingual Semantic Embeddings Using Disjoint Visually-Grounded Speech Audio Datasets</div><div class="cpsessionviewpaperauthor">[[Yasunori Ohishi|AUTHOR Yasunori Ohishi]], [[Akisato Kimura|AUTHOR Akisato Kimura]], [[Takahito Kawanishi|AUTHOR Takahito Kawanishi]], [[Kunio Kashino|AUTHOR Kunio Kashino]], [[David Harwath|AUTHOR David Harwath]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3094.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-1-8|PAPER Tue-1-1-8 — Self-Supervised Representations Improve End-to-End Speech Translation]]</div>|<div class="cpsessionviewpapertitle">Self-Supervised Representations Improve End-to-End Speech Translation</div><div class="cpsessionviewpaperauthor">[[Anne Wu|AUTHOR Anne Wu]], [[Changhan Wang|AUTHOR Changhan Wang]], [[Juan Pino|AUTHOR Juan Pino]], [[Jiatao Gu|AUTHOR Jiatao Gu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Marcin Włodarczak|
|^&nbsp;|^Hongwei Ding|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2216.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-1|PAPER Tue-1-10-1 — Correlating Cepstra with Formant Frequencies: Implications for Phonetically-Informed Forensic Voice Comparison]]</div>|<div class="cpsessionviewpapertitle">Correlating Cepstra with Formant Frequencies: Implications for Phonetically-Informed Forensic Voice Comparison</div><div class="cpsessionviewpaperauthor">[[Vincent Hughes|AUTHOR Vincent Hughes]], [[Frantz Clermont|AUTHOR Frantz Clermont]], [[Philip Harrison|AUTHOR Philip Harrison]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1607.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-2|PAPER Tue-1-10-2 — Prosody and Breathing: A Comparison Between Rhetorical and Information-Seeking Questions in German and Brazilian Portuguese]]</div>|<div class="cpsessionviewpapertitle">Prosody and Breathing: A Comparison Between Rhetorical and Information-Seeking Questions in German and Brazilian Portuguese</div><div class="cpsessionviewpaperauthor">[[Jana Neitsch|AUTHOR Jana Neitsch]], [[Plinio A. Barbosa|AUTHOR Plinio A. Barbosa]], [[Oliver Niebuhr|AUTHOR Oliver Niebuhr]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2101.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-3|PAPER Tue-1-10-3 — Scaling Processes of Clause Chains in Pitjantjatjara]]</div>|<div class="cpsessionviewpapertitle">Scaling Processes of Clause Chains in Pitjantjatjara</div><div class="cpsessionviewpaperauthor">[[Rebecca Defina|AUTHOR Rebecca Defina]], [[Catalina Torres|AUTHOR Catalina Torres]], [[Hywel Stoakes|AUTHOR Hywel Stoakes]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-4|PAPER Tue-1-10-4 — Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements]]</div>|<div class="cpsessionviewpapertitle">Neutralization of Voicing Distinction of Stops in Tohoku Dialects of Japanese: Field Work and Acoustic Measurements</div><div class="cpsessionviewpaperauthor">[[Ai Mizoguchi|AUTHOR Ai Mizoguchi]], [[Ayako Hashimoto|AUTHOR Ayako Hashimoto]], [[Sanae Matsui|AUTHOR Sanae Matsui]], [[Setsuko Imatomi|AUTHOR Setsuko Imatomi]], [[Ryunosuke Kobayashi|AUTHOR Ryunosuke Kobayashi]], [[Mafuyu Kitahara|AUTHOR Mafuyu Kitahara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2204.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-5|PAPER Tue-1-10-5 — Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English]]</div>|<div class="cpsessionviewpapertitle">Correlation Between Prosody and Pragmatics: Case Study of Discourse Markers in French and English</div><div class="cpsessionviewpaperauthor">[[Lou Lee|AUTHOR Lou Lee]], [[Denis Jouvet|AUTHOR Denis Jouvet]], [[Katarina Bartkova|AUTHOR Katarina Bartkova]], [[Yvon Keromnes|AUTHOR Yvon Keromnes]], [[Mathilde Dargnat|AUTHOR Mathilde Dargnat]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2322.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-6|PAPER Tue-1-10-6 — An Analysis of Prosodic Prominence Cues to Information Structure in Egyptian Arabic]]</div>|<div class="cpsessionviewpapertitle">An Analysis of Prosodic Prominence Cues to Information Structure in Egyptian Arabic</div><div class="cpsessionviewpaperauthor">[[Dina El Zarka|AUTHOR Dina El Zarka]], [[Anneliese Kelterer|AUTHOR Anneliese Kelterer]], [[Barbara Schuppler|AUTHOR Barbara Schuppler]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2942.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-7|PAPER Tue-1-10-7 — Lexical Stress in Urdu]]</div>|<div class="cpsessionviewpapertitle">Lexical Stress in Urdu</div><div class="cpsessionviewpaperauthor">[[Benazir Mumtaz|AUTHOR Benazir Mumtaz]], [[Tina Bögel|AUTHOR Tina Bögel]], [[Miriam Butt|AUTHOR Miriam Butt]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1057.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-8|PAPER Tue-1-10-8 — Vocal Markers from Sustained Phonation in Huntington’s Disease]]</div>|<div class="cpsessionviewpapertitle">Vocal Markers from Sustained Phonation in Huntington’s Disease</div><div class="cpsessionviewpaperauthor">[[Rachid Riad|AUTHOR Rachid Riad]], [[Hadrien Titeux|AUTHOR Hadrien Titeux]], [[Laurie Lemoine|AUTHOR Laurie Lemoine]], [[Justine Montillot|AUTHOR Justine Montillot]], [[Jennifer Hamet Bagnou|AUTHOR Jennifer Hamet Bagnou]], [[Xuan-Nga Cao|AUTHOR Xuan-Nga Cao]], [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]], [[Anne-Catherine Bachoud-Lévi|AUTHOR Anne-Catherine Bachoud-Lévi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2532.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-10-9|PAPER Tue-1-10-9 — How Rhythm and Timbre Encode Mooré Language in Bendré Drummed Speech]]</div>|<div class="cpsessionviewpapertitle">How Rhythm and Timbre Encode Mooré Language in Bendré Drummed Speech</div><div class="cpsessionviewpaperauthor">[[Laure Dentel|AUTHOR Laure Dentel]], [[Julien Meyer|AUTHOR Julien Meyer]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Yanhua Long|
|^&nbsp;|^Wenju Liu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1011.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-1|PAPER Tue-1-2-1 — Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms]]</div>|<div class="cpsessionviewpapertitle">Improved RawNet with Feature Map Scaling for Text-Independent Speaker Verification Using Raw Waveforms</div><div class="cpsessionviewpaperauthor">[[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Seung-bin Kim|AUTHOR Seung-bin Kim]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ju-ho Kim|AUTHOR Ju-ho Kim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-2|PAPER Tue-1-2-2 — Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances]]</div>|<div class="cpsessionviewpapertitle">Improving Multi-Scale Aggregation Using Feature Pyramid Module for Robust Speaker Verification of Variable-Duration Utterances</div><div class="cpsessionviewpaperauthor">[[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Seong Min Kye|AUTHOR Seong Min Kye]], [[Yeunju Choi|AUTHOR Yeunju Choi]], [[Myunghun Jung|AUTHOR Myunghun Jung]], [[Hoirin Kim|AUTHOR Hoirin Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-3|PAPER Tue-1-2-3 — An Adaptive X-Vector Model for Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">An Adaptive X-Vector Model for Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Bin Gu|AUTHOR Bin Gu]], [[Wu Guo|AUTHOR Wu Guo]], [[Fenglin Ding|AUTHOR Fenglin Ding]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Jun Du|AUTHOR Jun Du]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1402.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-4|PAPER Tue-1-2-4 — Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions]]</div>|<div class="cpsessionviewpapertitle">Shouted Speech Compensation for Speaker Verification Robust to Vocal Effort Conditions</div><div class="cpsessionviewpaperauthor">[[Santi Prieto|AUTHOR Santi Prieto]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Iván López-Espejo|AUTHOR Iván López-Espejo]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1501.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-5|PAPER Tue-1-2-5 — Sum-Product Networks for Robust Automatic Speaker Identification]]</div>|<div class="cpsessionviewpapertitle">Sum-Product Networks for Robust Automatic Speaker Identification</div><div class="cpsessionviewpaperauthor">[[Aaron Nicolson|AUTHOR Aaron Nicolson]], [[Kuldip K. Paliwal|AUTHOR Kuldip K. Paliwal]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1564.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-6|PAPER Tue-1-2-6 — Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms]]</div>|<div class="cpsessionviewpapertitle">Segment Aggregation for Short Utterances Speaker Verification Using Raw Waveforms</div><div class="cpsessionviewpaperauthor">[[Seung-bin Kim|AUTHOR Seung-bin Kim]], [[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ju-ho Kim|AUTHOR Ju-ho Kim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1742.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-7|PAPER Tue-1-2-7 — Siamese X-Vector Reconstruction for Domain Adapted Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Siamese X-Vector Reconstruction for Domain Adapted Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Shai Rozenberg|AUTHOR Shai Rozenberg]], [[Hagai Aronowitz|AUTHOR Hagai Aronowitz]], [[Ron Hoory|AUTHOR Ron Hoory]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1772.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-8|PAPER Tue-1-2-8 — Speaker Re-Identification with Speaker Dependent Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Speaker Re-Identification with Speaker Dependent Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Yanpei Shi|AUTHOR Yanpei Shi]], [[Qiang Huang|AUTHOR Qiang Huang]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-9|PAPER Tue-1-2-9 — Blind Speech Signal Quality Estimation for Speaker Verification Systems]]</div>|<div class="cpsessionviewpapertitle">Blind Speech Signal Quality Estimation for Speaker Verification Systems</div><div class="cpsessionviewpaperauthor">[[Galina Lavrentyeva|AUTHOR Galina Lavrentyeva]], [[Marina Volkova|AUTHOR Marina Volkova]], [[Anastasia Avdeeva|AUTHOR Anastasia Avdeeva]], [[Sergey Novoselov|AUTHOR Sergey Novoselov]], [[Artem Gorlanov|AUTHOR Artem Gorlanov]], [[Tseren Andzhukaev|AUTHOR Tseren Andzhukaev]], [[Artem Ivanov|AUTHOR Artem Ivanov]], [[Alexander Kozlov|AUTHOR Alexander Kozlov]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2441.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-2-10|PAPER Tue-1-2-10 — Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Investigating Robustness of Adversarial Samples Detection for Automatic Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Xu Li|AUTHOR Xu Li]], [[Na Li|AUTHOR Na Li]], [[Jinghua Zhong|AUTHOR Jinghua Zhong]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Dan Su|AUTHOR Dan Su]], [[Dong Yu|AUTHOR Dong Yu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Koji Inoue|
|^&nbsp;|^Tatsuya Kawahara|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-1|PAPER Tue-1-3-1 — Modeling ASR Ambiguity for Neural Dialogue State Tracking]]</div>|<div class="cpsessionviewpapertitle">Modeling ASR Ambiguity for Neural Dialogue State Tracking</div><div class="cpsessionviewpaperauthor">[[Vaishali Pal|AUTHOR Vaishali Pal]], [[Fabien Guillot|AUTHOR Fabien Guillot]], [[Manish Shrivastava|AUTHOR Manish Shrivastava]], [[Jean-Michel Renders|AUTHOR Jean-Michel Renders]], [[Laurent Besacier|AUTHOR Laurent Besacier]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1753.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-2|PAPER Tue-1-3-2 — ASR Error Correction with Augmented Transformer for Entity Retrieval]]</div>|<div class="cpsessionviewpapertitle">ASR Error Correction with Augmented Transformer for Entity Retrieval</div><div class="cpsessionviewpaperauthor">[[Haoyu Wang|AUTHOR Haoyu Wang]], [[Shuyan Dong|AUTHOR Shuyan Dong]], [[Yue Liu|AUTHOR Yue Liu]], [[James Logan|AUTHOR James Logan]], [[Ashish Kumar Agrawal|AUTHOR Ashish Kumar Agrawal]], [[Yang Liu|AUTHOR Yang Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0059.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-3|PAPER Tue-1-3-3 — Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Large-Scale Transfer Learning for Low-Resource Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Xueli Jia|AUTHOR Xueli Jia]], [[Jianzong Wang|AUTHOR Jianzong Wang]], [[Zhiyong Zhang|AUTHOR Zhiyong Zhang]], [[Ning Cheng|AUTHOR Ning Cheng]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1676.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-4|PAPER Tue-1-3-4 — Data Balancing for Boosting Performance of Low-Frequency Classes in Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Data Balancing for Boosting Performance of Low-Frequency Classes in Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Judith Gaspers|AUTHOR Judith Gaspers]], [[Quynh Do|AUTHOR Quynh Do]], [[Fabian Triefenbach|AUTHOR Fabian Triefenbach]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2967.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-5|PAPER Tue-1-3-5 — An Interactive Adversarial Reward Learning-Based Spoken Language Understanding System]]</div>|<div class="cpsessionviewpapertitle">An Interactive Adversarial Reward Learning-Based Spoken Language Understanding System</div><div class="cpsessionviewpaperauthor">[[Yu Wang|AUTHOR Yu Wang]], [[Yilin Shen|AUTHOR Yilin Shen]], [[Hongxia Jin|AUTHOR Hongxia Jin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2907.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-6|PAPER Tue-1-3-6 — Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Style Attuned Pre-Training and Parameter Efficient Fine-Tuning for Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Jin Cao|AUTHOR Jin Cao]], [[Jun Wang|AUTHOR Jun Wang]], [[Wael Hamza|AUTHOR Wael Hamza]], [[Kelly Vanee|AUTHOR Kelly Vanee]], [[Shang-Wen Li|AUTHOR Shang-Wen Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2010.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-7|PAPER Tue-1-3-7 — Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Domain Adaptation for Dialogue Sequence Labeling Based on Hierarchical Adversarial Training</div><div class="cpsessionviewpaperauthor">[[Shota Orihashi|AUTHOR Shota Orihashi]], [[Mana Ihori|AUTHOR Mana Ihori]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Ryo Masumura|AUTHOR Ryo Masumura]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1949.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-8|PAPER Tue-1-3-8 — Deep F-Measure Maximization for End-to-End Speech Understanding]]</div>|<div class="cpsessionviewpapertitle">Deep F-Measure Maximization for End-to-End Speech Understanding</div><div class="cpsessionviewpaperauthor">[[Leda Sarı|AUTHOR Leda Sarı]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-9|PAPER Tue-1-3-9 — An Effective Domain Adaptive Post-Training Method for BERT in Response Selection]]</div>|<div class="cpsessionviewpapertitle">An Effective Domain Adaptive Post-Training Method for BERT in Response Selection</div><div class="cpsessionviewpaperauthor">[[Taesun Whang|AUTHOR Taesun Whang]], [[Dongyub Lee|AUTHOR Dongyub Lee]], [[Chanhee Lee|AUTHOR Chanhee Lee]], [[Kisu Yang|AUTHOR Kisu Yang]], [[Dongsuk Oh|AUTHOR Dongsuk Oh]], [[Heuiseok Lim|AUTHOR Heuiseok Lim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2298.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-3-10|PAPER Tue-1-3-10 — Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Confidence Measure for Speech-to-Concept End-to-End Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Antoine Caubrière|AUTHOR Antoine Caubrière]], [[Yannick Estève|AUTHOR Yannick Estève]], [[Antoine Laurent|AUTHOR Antoine Laurent]], [[Emmanuel Morin|AUTHOR Emmanuel Morin]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Fanny Meunier|
|^&nbsp;|^Jiaming Xu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3042.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-1|PAPER Tue-1-4-1 — Attention to Indexical Information Improves Voice Recall]]</div>|<div class="cpsessionviewpapertitle">Attention to Indexical Information Improves Voice Recall</div><div class="cpsessionviewpaperauthor">[[Grant L. McGuire|AUTHOR Grant L. McGuire]], [[Molly Babel|AUTHOR Molly Babel]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2683.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-2|PAPER Tue-1-4-2 — Categorization of Whistled Consonants by French Speakers]]</div>|<div class="cpsessionviewpapertitle">Categorization of Whistled Consonants by French Speakers</div><div class="cpsessionviewpaperauthor">[[Anaïs Tran Ngoc|AUTHOR Anaïs Tran Ngoc]], [[Julien Meyer|AUTHOR Julien Meyer]], [[Fanny Meunier|AUTHOR Fanny Meunier]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2697.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-3|PAPER Tue-1-4-3 — Whistled Vowel Identification by French Listeners]]</div>|<div class="cpsessionviewpapertitle">Whistled Vowel Identification by French Listeners</div><div class="cpsessionviewpaperauthor">[[Anaïs Tran Ngoc|AUTHOR Anaïs Tran Ngoc]], [[Julien Meyer|AUTHOR Julien Meyer]], [[Fanny Meunier|AUTHOR Fanny Meunier]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2509.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-4|PAPER Tue-1-4-4 — F0 Slope and Mean: Cues to Speech Segmentation in French]]</div>|<div class="cpsessionviewpapertitle">F0 Slope and Mean: Cues to Speech Segmentation in French</div><div class="cpsessionviewpaperauthor">[[Maria del Mar Cordero|AUTHOR Maria del Mar Cordero]], [[Fanny Meunier|AUTHOR Fanny Meunier]], [[Nicolas Grimault|AUTHOR Nicolas Grimault]], [[Stéphane Pota|AUTHOR Stéphane Pota]], [[Elsa Spinelli|AUTHOR Elsa Spinelli]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1263.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-5|PAPER Tue-1-4-5 — Does French Listeners’ Ability to Use Accentual Information at the Word Level Depend on the Ear of Presentation?]]</div>|<div class="cpsessionviewpapertitle">Does French Listeners’ Ability to Use Accentual Information at the Word Level Depend on the Ear of Presentation?</div><div class="cpsessionviewpaperauthor">[[Amandine Michelas|AUTHOR Amandine Michelas]], [[Sophie Dufour|AUTHOR Sophie Dufour]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0056.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-6|PAPER Tue-1-4-6 — A Perceptual Study of the Five Level Tones in Hmu (Xinzhai Variety)]]</div>|<div class="cpsessionviewpapertitle">A Perceptual Study of the Five Level Tones in Hmu (Xinzhai Variety)</div><div class="cpsessionviewpaperauthor">[[Wen Liu|AUTHOR Wen Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2612.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-7|PAPER Tue-1-4-7 — Mandarin and English Adults’ Cue-Weighting of Lexical Stress]]</div>|<div class="cpsessionviewpapertitle">Mandarin and English Adults’ Cue-Weighting of Lexical Stress</div><div class="cpsessionviewpaperauthor">[[Zhen Zeng|AUTHOR Zhen Zeng]], [[Karen Mattock|AUTHOR Karen Mattock]], [[Liquan Liu|AUTHOR Liquan Liu]], [[Varghese Peter|AUTHOR Varghese Peter]], [[Alba Tuninetti|AUTHOR Alba Tuninetti]], [[Feng-Ming Tsao|AUTHOR Feng-Ming Tsao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2194.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-8|PAPER Tue-1-4-8 — Age-Related Differences of Tone Perception in Mandarin-Speaking Seniors]]</div>|<div class="cpsessionviewpapertitle">Age-Related Differences of Tone Perception in Mandarin-Speaking Seniors</div><div class="cpsessionviewpaperauthor">[[Yan Feng|AUTHOR Yan Feng]], [[Gang Peng|AUTHOR Gang Peng]], [[William Shi-Yuan Wang|AUTHOR William Shi-Yuan Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1335.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-9|PAPER Tue-1-4-9 — Social and Functional Pressures in Vocal Alignment: Differences for Human and Voice-AI Interlocutors]]</div>|<div class="cpsessionviewpapertitle">Social and Functional Pressures in Vocal Alignment: Differences for Human and Voice-AI Interlocutors</div><div class="cpsessionviewpaperauthor">[[Georgia Zellou|AUTHOR Georgia Zellou]], [[Michelle Cohn|AUTHOR Michelle Cohn]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2637.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-4-10|PAPER Tue-1-4-10 — Identifying Important Time-Frequency Locations in Continuous Speech Utterances]]</div>|<div class="cpsessionviewpapertitle">Identifying Important Time-Frequency Locations in Continuous Speech Utterances</div><div class="cpsessionviewpaperauthor">[[Hassan Salami Kavaki|AUTHOR Hassan Salami Kavaki]], [[Michael I. Mandel|AUTHOR Michael I. Mandel]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Xiaodan Zhuang|
|^&nbsp;|^Tudor-Cătălin Zorilă|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-1|PAPER Tue-1-5-1 — Raw Sign and Magnitude Spectra for Multi-Head Acoustic Modelling]]</div>|<div class="cpsessionviewpapertitle">Raw Sign and Magnitude Spectra for Multi-Head Acoustic Modelling</div><div class="cpsessionviewpaperauthor">[[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2301.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-2|PAPER Tue-1-5-2 — Robust Raw Waveform Speech Recognition Using Relevance Weighted Representations]]</div>|<div class="cpsessionviewpapertitle">Robust Raw Waveform Speech Recognition Using Relevance Weighted Representations</div><div class="cpsessionviewpaperauthor">[[Purvi Agrawal|AUTHOR Purvi Agrawal]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-3|PAPER Tue-1-5-3 — A Deep 2D Convolutional Network for Waveform-Based Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">A Deep 2D Convolutional Network for Waveform-Based Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Dino Oglic|AUTHOR Dino Oglic]], [[Zoran Cvetkovic|AUTHOR Zoran Cvetkovic]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1392.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-4|PAPER Tue-1-5-4 — Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions]]</div>|<div class="cpsessionviewpapertitle">Lightweight End-to-End Speech Recognition from Raw Audio Data Using Sinc-Convolutions</div><div class="cpsessionviewpaperauthor">[[Ludwig Kürzinger|AUTHOR Ludwig Kürzinger]], [[Nicolas Lindae|AUTHOR Nicolas Lindae]], [[Palle Klewitz|AUTHOR Palle Klewitz]], [[Gerhard Rigoll|AUTHOR Gerhard Rigoll]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-5|PAPER Tue-1-5-5 — An Alternative to MFCCs for ASR]]</div>|<div class="cpsessionviewpapertitle">An Alternative to MFCCs for ASR</div><div class="cpsessionviewpaperauthor">[[Pegah Ghahramani|AUTHOR Pegah Ghahramani]], [[Hossein Hadian|AUTHOR Hossein Hadian]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2258.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-6|PAPER Tue-1-5-6 — Phase Based Spectro-Temporal Features for Building a Robust ASR System]]</div>|<div class="cpsessionviewpapertitle">Phase Based Spectro-Temporal Features for Building a Robust ASR System</div><div class="cpsessionviewpaperauthor">[[Anirban Dutta|AUTHOR Anirban Dutta]], [[G. Ashishkumar|AUTHOR G. Ashishkumar]], [[Ch.V. Rama Rao|AUTHOR Ch.V. Rama Rao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2656.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-7|PAPER Tue-1-5-7 — Deep Scattering Power Spectrum Features for Robust Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Deep Scattering Power Spectrum Features for Robust Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Neethu M. Joy|AUTHOR Neethu M. Joy]], [[Dino Oglic|AUTHOR Dino Oglic]], [[Zoran Cvetkovic|AUTHOR Zoran Cvetkovic]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2102.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-8|PAPER Tue-1-5-8 — FusionRNN: Shared Neural Parameters for Multi-Channel Distant Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">FusionRNN: Shared Neural Parameters for Multi-Channel Distant Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Titouan Parcollet|AUTHOR Titouan Parcollet]], [[Xinchi Qiu|AUTHOR Xinchi Qiu]], [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2904.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-9|PAPER Tue-1-5-9 — Bandpass Noise Generation and Augmentation for Unified ASR]]</div>|<div class="cpsessionviewpapertitle">Bandpass Noise Generation and Augmentation for Unified ASR</div><div class="cpsessionviewpaperauthor">[[Kshitiz Kumar|AUTHOR Kshitiz Kumar]], [[Bo Ren|AUTHOR Bo Ren]], [[Yifan Gong|AUTHOR Yifan Gong]], [[Jian Wu|AUTHOR Jian Wu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-5-10|PAPER Tue-1-5-10 — Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Deep Learning Based Dereverberation of Temporal Envelopes for Robust Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Anurenjan Purushothaman|AUTHOR Anurenjan Purushothaman]], [[Anirudh Sreeram|AUTHOR Anirudh Sreeram]], [[Rohit Kumar|AUTHOR Rohit Kumar]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Ruibo Fu|
|^&nbsp;|^Takuma Okamoro|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1094.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-1|PAPER Tue-1-7-1 — g2pM: A Neural Grapheme-to-Phoneme Conversion Package for Mandarin Chinese Based on a New Open Benchmark Dataset]]</div>|<div class="cpsessionviewpapertitle">g2pM: A Neural Grapheme-to-Phoneme Conversion Package for Mandarin Chinese Based on a New Open Benchmark Dataset</div><div class="cpsessionviewpaperauthor">[[Kyubyong Park|AUTHOR Kyubyong Park]], [[Seanie Lee|AUTHOR Seanie Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1142.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-2|PAPER Tue-1-7-2 — A Mask-Based Model for Mandarin Chinese Polyphone Disambiguation]]</div>|<div class="cpsessionviewpapertitle">A Mask-Based Model for Mandarin Chinese Polyphone Disambiguation</div><div class="cpsessionviewpaperauthor">[[Haiteng Zhang|AUTHOR Haiteng Zhang]], [[Huashan Pan|AUTHOR Huashan Pan]], [[Xiulin Li|AUTHOR Xiulin Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1336.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-3|PAPER Tue-1-7-3 — Perception of Concatenative vs. Neural Text-To-Speech (TTS): Differences in Intelligibility in Noise and Language Attitudes]]</div>|<div class="cpsessionviewpapertitle">Perception of Concatenative vs. Neural Text-To-Speech (TTS): Differences in Intelligibility in Noise and Language Attitudes</div><div class="cpsessionviewpaperauthor">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1547.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-4|PAPER Tue-1-7-4 — Enhancing Sequence-to-Sequence Text-to-Speech with Morphology]]</div>|<div class="cpsessionviewpapertitle">Enhancing Sequence-to-Sequence Text-to-Speech with Morphology</div><div class="cpsessionviewpaperauthor">[[Jason Taylor|AUTHOR Jason Taylor]], [[Korin Richmond|AUTHOR Korin Richmond]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2111.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-5|PAPER Tue-1-7-5 — Deep MOS Predictor for Synthetic Speech Using Cluster-Based Modeling]]</div>|<div class="cpsessionviewpapertitle">Deep MOS Predictor for Synthetic Speech Using Cluster-Based Modeling</div><div class="cpsessionviewpaperauthor">[[Yeunju Choi|AUTHOR Yeunju Choi]], [[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Hoirin Kim|AUTHOR Hoirin Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2382.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-6|PAPER Tue-1-7-6 — Deep Learning Based Assessment of Synthetic Speech Naturalness]]</div>|<div class="cpsessionviewpapertitle">Deep Learning Based Assessment of Synthetic Speech Naturalness</div><div class="cpsessionviewpaperauthor">[[Gabriel Mittag|AUTHOR Gabriel Mittag]], [[Sebastian Möller|AUTHOR Sebastian Möller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2427.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-7|PAPER Tue-1-7-7 — Distant Supervision for Polyphone Disambiguation in Mandarin Chinese]]</div>|<div class="cpsessionviewpapertitle">Distant Supervision for Polyphone Disambiguation in Mandarin Chinese</div><div class="cpsessionviewpaperauthor">[[Jiawen Zhang|AUTHOR Jiawen Zhang]], [[Yuanyuan Zhao|AUTHOR Yuanyuan Zhao]], [[Jiaqi Zhu|AUTHOR Jiaqi Zhu]], [[Jinba Xiao|AUTHOR Jinba Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2567.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-8|PAPER Tue-1-7-8 — An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets]]</div>|<div class="cpsessionviewpapertitle">An Unsupervised Method to Select a Speaker Subset from Large Multi-Speaker Speech Synthesis Datasets</div><div class="cpsessionviewpaperauthor">[[Pilar Oplustil Gallegos|AUTHOR Pilar Oplustil Gallegos]], [[Jennifer Williams|AUTHOR Jennifer Williams]], [[Joanna Rownicka|AUTHOR Joanna Rownicka]], [[Simon King|AUTHOR Simon King]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2910.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-7-9|PAPER Tue-1-7-9 — Understanding the Effect of Voice Quality and Accent on Talker Similarity]]</div>|<div class="cpsessionviewpapertitle">Understanding the Effect of Voice Quality and Accent on Talker Similarity</div><div class="cpsessionviewpaperauthor">[[Anurag Das|AUTHOR Anurag Das]], [[Guanlong Zhao|AUTHOR Guanlong Zhao]], [[John Levis|AUTHOR John Levis]], [[Evgeny Chukharev-Hudilainen|AUTHOR Evgeny Chukharev-Hudilainen]], [[Ricardo Gutierrez-Osuna|AUTHOR Ricardo Gutierrez-Osuna]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 8|<|
|^Chairs:&nbsp;|^Ralf Schlüter|
|^&nbsp;|^Hermann Ney|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1958.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-8-1|PAPER Tue-1-8-1 — Robust Beam Search for Encoder-Decoder Attention Based Speech Recognition Without Length Bias]]</div>|<div class="cpsessionviewpapertitle">Robust Beam Search for Encoder-Decoder Attention Based Speech Recognition Without Length Bias</div><div class="cpsessionviewpaperauthor">[[Wei Zhou|AUTHOR Wei Zhou]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2677.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-8-2|PAPER Tue-1-8-2 — Transformer with Bidirectional Decoder for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Transformer with Bidirectional Decoder for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Xi Chen|AUTHOR Xi Chen]], [[Songyang Zhang|AUTHOR Songyang Zhang]], [[Dandan Song|AUTHOR Dandan Song]], [[Peng Ouyang|AUTHOR Peng Ouyang]], [[Shouyi Yin|AUTHOR Shouyi Yin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1873.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-8-3|PAPER Tue-1-8-3 — An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">An Investigation of Phone-Based Subword Units for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Weiran Wang|AUTHOR Weiran Wang]], [[Guangsen Wang|AUTHOR Guangsen Wang]], [[Aadyot Bhatnagar|AUTHOR Aadyot Bhatnagar]], [[Yingbo Zhou|AUTHOR Yingbo Zhou]], [[Caiming Xiong|AUTHOR Caiming Xiong]], [[Richard Socher|AUTHOR Richard Socher]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2141.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-8-4|PAPER Tue-1-8-4 — Combination of End-to-End and Hybrid Models for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Combination of End-to-End and Hybrid Models for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jeremy H.M. Wong|AUTHOR Jeremy H.M. Wong]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Rui Zhao|AUTHOR Rui Zhao]], [[Liang Lu|AUTHOR Liang Lu]], [[Eric Sun|AUTHOR Eric Sun]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Yifan Gong|AUTHOR Yifan Gong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1233.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-8-5|PAPER Tue-1-8-5 — Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Evolved Speech-Transformer: Applying Neural Architecture Search to End-to-End Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jihwan Kim|AUTHOR Jihwan Kim]], [[Jisung Wang|AUTHOR Jisung Wang]], [[Sangki Kim|AUTHOR Sangki Kim]], [[Yeha Lee|AUTHOR Yeha Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3174.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-8-6|PAPER Tue-1-8-6 — Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Hierarchical Multi-Stage Word-to-Grapheme Named Entity Corrector for Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Abhinav Garg|AUTHOR Abhinav Garg]], [[Ashutosh Gupta|AUTHOR Ashutosh Gupta]], [[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]], [[Shatrughan Singh|AUTHOR Shatrughan Singh]], [[Chanwoo Kim|AUTHOR Chanwoo Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1164.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-8-7|PAPER Tue-1-8-7 — LVCSR with Transformer Language Models]]</div>|<div class="cpsessionviewpapertitle">LVCSR with Transformer Language Models</div><div class="cpsessionviewpaperauthor">[[Eugen Beck|AUTHOR Eugen Beck]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1315.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-8-8|PAPER Tue-1-8-8 — DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation]]</div>|<div class="cpsessionviewpapertitle">DARTS-ASR: Differentiable Architecture Search for Multilingual Speech Recognition and Adaptation</div><div class="cpsessionviewpaperauthor">[[Yi-Chen Chen|AUTHOR Yi-Chen Chen]], [[Jui-Yang Hsu|AUTHOR Jui-Yang Hsu]], [[Cheng-Kuang Lee|AUTHOR Cheng-Kuang Lee]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Chi-Chun Lee|
|^&nbsp;|^Chengwei Huang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2862.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-1|PAPER Tue-1-9-1 — Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus]]</div>|<div class="cpsessionviewpapertitle">Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus</div><div class="cpsessionviewpaperauthor">[[Lukas Stappen|AUTHOR Lukas Stappen]], [[Georgios Rizos|AUTHOR Georgios Rizos]], [[Madina Hasan|AUTHOR Madina Hasan]], [[Thomas Hain|AUTHOR Thomas Hain]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1339.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-2|PAPER Tue-1-9-2 — Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits]]</div>|<div class="cpsessionviewpapertitle">Individual Variation in Language Attitudes Toward Voice-AI: The Role of Listeners’ Autistic-Like Traits</div><div class="cpsessionviewpaperauthor">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Melina Sarian|AUTHOR Melina Sarian]], [[Kristin Predeck|AUTHOR Kristin Predeck]], [[Georgia Zellou|AUTHOR Georgia Zellou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1938.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-3|PAPER Tue-1-9-3 — Differences in Gradient Emotion Perception: Human vs. Alexa Voices]]</div>|<div class="cpsessionviewpapertitle">Differences in Gradient Emotion Perception: Human vs. Alexa Voices</div><div class="cpsessionviewpaperauthor">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Eran Raveh|AUTHOR Eran Raveh]], [[Kristin Predeck|AUTHOR Kristin Predeck]], [[Iona Gessinger|AUTHOR Iona Gessinger]], [[Bernd Möbius|AUTHOR Bernd Möbius]], [[Georgia Zellou|AUTHOR Georgia Zellou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2444.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-4|PAPER Tue-1-9-4 — The MSP-Conversation Corpus]]</div>|<div class="cpsessionviewpapertitle">The MSP-Conversation Corpus</div><div class="cpsessionviewpaperauthor">[[Luz Martinez-Lucas|AUTHOR Luz Martinez-Lucas]], [[Mohammed Abdelwahab|AUTHOR Mohammed Abdelwahab]], [[Carlos Busso|AUTHOR Carlos Busso]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2888.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-5|PAPER Tue-1-9-5 — Spotting the Traces of Depression in Read Speech: An Approach Based on Computational Paralinguistics and Social Signal Processing]]</div>|<div class="cpsessionviewpapertitle">Spotting the Traces of Depression in Read Speech: An Approach Based on Computational Paralinguistics and Social Signal Processing</div><div class="cpsessionviewpaperauthor">[[Fuxiang Tao|AUTHOR Fuxiang Tao]], [[Anna Esposito|AUTHOR Anna Esposito]], [[Alessandro Vinciarelli|AUTHOR Alessandro Vinciarelli]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2890.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-6|PAPER Tue-1-9-6 — Speech Sentiment and Customer Satisfaction Estimation in Socialbot Conversations]]</div>|<div class="cpsessionviewpapertitle">Speech Sentiment and Customer Satisfaction Estimation in Socialbot Conversations</div><div class="cpsessionviewpaperauthor">[[Yelin Kim|AUTHOR Yelin Kim]], [[Joshua Levy|AUTHOR Joshua Levy]], [[Yang Liu|AUTHOR Yang Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2964.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-7|PAPER Tue-1-9-7 — Pardon the Interruption: An Analysis of Gender and Turn-Taking in U.S. Supreme Court Oral Arguments]]</div>|<div class="cpsessionviewpapertitle">Pardon the Interruption: An Analysis of Gender and Turn-Taking in U.S. Supreme Court Oral Arguments</div><div class="cpsessionviewpaperauthor">[[Haley Lepp|AUTHOR Haley Lepp]], [[Gina-Anne Levow|AUTHOR Gina-Anne Levow]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1611.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-8|PAPER Tue-1-9-8 — Are Germans Better Haters Than Danes? Language-Specific Implicit Prosodies of Types of Hate Speech and How They Relate to Perceived Severity and Societal Rules]]</div>|<div class="cpsessionviewpapertitle">Are Germans Better Haters Than Danes? Language-Specific Implicit Prosodies of Types of Hate Speech and How They Relate to Perceived Severity and Societal Rules</div><div class="cpsessionviewpaperauthor">[[Jana Neitsch|AUTHOR Jana Neitsch]], [[Oliver Niebuhr|AUTHOR Oliver Niebuhr]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1627.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-9|PAPER Tue-1-9-9 — An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures]]</div>|<div class="cpsessionviewpapertitle">An Objective Voice Gender Scoring System and Identification of the Salient Acoustic Measures</div><div class="cpsessionviewpaperauthor">[[Fuling Chen|AUTHOR Fuling Chen]], [[Roberto Togneri|AUTHOR Roberto Togneri]], [[Murray Maybery|AUTHOR Murray Maybery]], [[Diana Tan|AUTHOR Diana Tan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2030.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-1-9-10|PAPER Tue-1-9-10 — How Ordinal Are Your Data?]]</div>|<div class="cpsessionviewpapertitle">How Ordinal Are Your Data?</div><div class="cpsessionviewpaperauthor">[[Sadari Jayawardena|AUTHOR Sadari Jayawardena]], [[Julien Epps|AUTHOR Julien Epps]], [[Zhaocheng Huang|AUTHOR Zhaocheng Huang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Tuesday 27 October 2020, Room 6|<|
|^Chairs:&nbsp;|^Natalia Tomashenko|
|^&nbsp;|^Emmanuel Vincent|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1333.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-1-6-1|PAPER Tue-SS-1-6-1 — Introducing the VoicePrivacy Initiative]]</div>|<div class="cpsessionviewpapertitle">Introducing the VoicePrivacy Initiative</div><div class="cpsessionviewpaperauthor">[[N. Tomashenko|AUTHOR N. Tomashenko]], [[Brij Mohan Lal Srivastava|AUTHOR Brij Mohan Lal Srivastava]], [[Xin Wang|AUTHOR Xin Wang]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]], [[Andreas Nautsch|AUTHOR Andreas Nautsch]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Nicholas Evans|AUTHOR Nicholas Evans]], [[Jose Patino|AUTHOR Jose Patino]], [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]], [[Paul-Gauthier Noé|AUTHOR Paul-Gauthier Noé]], [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1815.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-1-6-2|PAPER Tue-SS-1-6-2 — The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment]]</div>|<div class="cpsessionviewpapertitle">The Privacy ZEBRA: Zero Evidence Biometric Recognition Assessment</div><div class="cpsessionviewpaperauthor">[[Andreas Nautsch|AUTHOR Andreas Nautsch]], [[Jose Patino|AUTHOR Jose Patino]], [[N. Tomashenko|AUTHOR N. Tomashenko]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Paul-Gauthier Noé|AUTHOR Paul-Gauthier Noé]], [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]], [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]], [[Nicholas Evans|AUTHOR Nicholas Evans]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1887.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-1-6-3|PAPER Tue-SS-1-6-3 — X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System]]</div>|<div class="cpsessionviewpapertitle">X-Vector Singular Value Modification and Statistical-Based Decomposition with Ensemble Regression Modeling for Speaker Anonymization System</div><div class="cpsessionviewpaperauthor">[[Candy Olivia Mawalim|AUTHOR Candy Olivia Mawalim]], [[Kasorn Galajit|AUTHOR Kasorn Galajit]], [[Jessada Karnjana|AUTHOR Jessada Karnjana]], [[Masashi Unoki|AUTHOR Masashi Unoki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2248.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-1-6-4|PAPER Tue-SS-1-6-4 — A Comparative Study of Speech Anonymization Metrics]]</div>|<div class="cpsessionviewpapertitle">A Comparative Study of Speech Anonymization Metrics</div><div class="cpsessionviewpaperauthor">[[Mohamed Maouche|AUTHOR Mohamed Maouche]], [[Brij Mohan Lal Srivastava|AUTHOR Brij Mohan Lal Srivastava]], [[Nathalie Vauquier|AUTHOR Nathalie Vauquier]], [[Aurélien Bellet|AUTHOR Aurélien Bellet]], [[Marc Tommasi|AUTHOR Marc Tommasi]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-1-6-5|PAPER Tue-SS-1-6-5 — Design Choices for X-Vector Based Speaker Anonymization]]</div>|<div class="cpsessionviewpapertitle">Design Choices for X-Vector Based Speaker Anonymization</div><div class="cpsessionviewpaperauthor">[[Brij Mohan Lal Srivastava|AUTHOR Brij Mohan Lal Srivastava]], [[N. Tomashenko|AUTHOR N. Tomashenko]], [[Xin Wang|AUTHOR Xin Wang]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Mohamed Maouche|AUTHOR Mohamed Maouche]], [[Aurélien Bellet|AUTHOR Aurélien Bellet]], [[Marc Tommasi|AUTHOR Marc Tommasi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2720.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-1-6-6|PAPER Tue-SS-1-6-6 — Speech Pseudonymisation Assessment Using Voice Similarity Matrices]]</div>|<div class="cpsessionviewpapertitle">Speech Pseudonymisation Assessment Using Voice Similarity Matrices</div><div class="cpsessionviewpaperauthor">[[Paul-Gauthier Noé|AUTHOR Paul-Gauthier Noé]], [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]], [[Driss Matrouf|AUTHOR Driss Matrouf]], [[N. Tomashenko|AUTHOR N. Tomashenko]], [[Andreas Nautsch|AUTHOR Andreas Nautsch]], [[Nicholas Evans|AUTHOR Nicholas Evans]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Wentao Gu|
|^&nbsp;|^Mariapaola D’Imperio|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2695.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-1-1|PAPER Wed-1-1-1 — Interaction of Tone and Voicing in Mizo]]</div>|<div class="cpsessionviewpapertitle">Interaction of Tone and Voicing in Mizo</div><div class="cpsessionviewpaperauthor">[[Wendy Lalhminghlui|AUTHOR Wendy Lalhminghlui]], [[Priyankoo Sarmah|AUTHOR Priyankoo Sarmah]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1614.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-1-2|PAPER Wed-1-1-2 — Mandarin Lexical Tones: A Corpus-Based Study of Word Length, Syllable Position and Prosodic Position on Duration]]</div>|<div class="cpsessionviewpapertitle">Mandarin Lexical Tones: A Corpus-Based Study of Word Length, Syllable Position and Prosodic Position on Duration</div><div class="cpsessionviewpaperauthor">[[Yaru Wu|AUTHOR Yaru Wu]], [[Martine Adda-Decker|AUTHOR Martine Adda-Decker]], [[Lori Lamel|AUTHOR Lori Lamel]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2823.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-1-3|PAPER Wed-1-1-3 — An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech]]</div>|<div class="cpsessionviewpapertitle">An Investigation of the Target Approximation Model for Tone Modeling and Recognition in Continuous Mandarin Speech</div><div class="cpsessionviewpaperauthor">[[Yingming Gao|AUTHOR Yingming Gao]], [[Xinyu Zhang|AUTHOR Xinyu Zhang]], [[Yi Xu|AUTHOR Yi Xu]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]], [[Peter Birkholz|AUTHOR Peter Birkholz]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2073.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-1-4|PAPER Wed-1-1-4 — Integrating the Application and Realization of Mandarin 3rd Tone Sandhi in the Resolution of Sentence Ambiguity]]</div>|<div class="cpsessionviewpapertitle">Integrating the Application and Realization of Mandarin 3rd Tone Sandhi in the Resolution of Sentence Ambiguity</div><div class="cpsessionviewpaperauthor">[[Wei Lai|AUTHOR Wei Lai]], [[Aini Li|AUTHOR Aini Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1257.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-1-5|PAPER Wed-1-1-5 — Neutral Tone in Changde Mandarin]]</div>|<div class="cpsessionviewpapertitle">Neutral Tone in Changde Mandarin</div><div class="cpsessionviewpaperauthor">[[Zhenrui Zhang|AUTHOR Zhenrui Zhang]], [[Fang Hu|AUTHOR Fang Hu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1987.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-1-6|PAPER Wed-1-1-6 — Pitch Declination and Final Lowering in Northeastern Mandarin]]</div>|<div class="cpsessionviewpapertitle">Pitch Declination and Final Lowering in Northeastern Mandarin</div><div class="cpsessionviewpaperauthor">[[Ping Cui|AUTHOR Ping Cui]], [[Jianjing Kuang|AUTHOR Jianjing Kuang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1954.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-1-7|PAPER Wed-1-1-7 — Variation in Spectral Slope and Interharmonic Noise in Cantonese Tones]]</div>|<div class="cpsessionviewpapertitle">Variation in Spectral Slope and Interharmonic Noise in Cantonese Tones</div><div class="cpsessionviewpaperauthor">[[Phil Rose|AUTHOR Phil Rose]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1274.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-1-8|PAPER Wed-1-1-8 — The Acoustic Realization of Mandarin Tones in Fast Speech]]</div>|<div class="cpsessionviewpapertitle">The Acoustic Realization of Mandarin Tones in Fast Speech</div><div class="cpsessionviewpaperauthor">[[Ping Tang|AUTHOR Ping Tang]], [[Shanpeng Li|AUTHOR Shanpeng Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Jian Gong|
|^&nbsp;|^Shan Liang|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1023.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-1|PAPER Wed-1-10-1 — The Effect of Language Proficiency on the Perception of Segmental Foreign Accent]]</div>|<div class="cpsessionviewpapertitle">The Effect of Language Proficiency on the Perception of Segmental Foreign Accent</div><div class="cpsessionviewpaperauthor">[[Rubén Pérez-Ramón|AUTHOR Rubén Pérez-Ramón]], [[María Luisa García Lecumberri|AUTHOR María Luisa García Lecumberri]], [[Martin Cooke|AUTHOR Martin Cooke]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1678.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-2|PAPER Wed-1-10-2 — The Effect of Language Dominance on the Selective Attention of Segments and Tones in Urdu-Cantonese Speakers]]</div>|<div class="cpsessionviewpapertitle">The Effect of Language Dominance on the Selective Attention of Segments and Tones in Urdu-Cantonese Speakers</div><div class="cpsessionviewpaperauthor">[[Yi Liu|AUTHOR Yi Liu]], [[Jinghong Ning|AUTHOR Jinghong Ning]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2595.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-3|PAPER Wed-1-10-3 — The Effect of Input on the Production of English Tense and Lax Vowels by Chinese Learners: Evidence from an Elementary School in China]]</div>|<div class="cpsessionviewpapertitle">The Effect of Input on the Production of English Tense and Lax Vowels by Chinese Learners: Evidence from an Elementary School in China</div><div class="cpsessionviewpaperauthor">[[Mengrou Li|AUTHOR Mengrou Li]], [[Ying Chen|AUTHOR Ying Chen]], [[Jie Cui|AUTHOR Jie Cui]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2783.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-4|PAPER Wed-1-10-4 — Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers]]</div>|<div class="cpsessionviewpapertitle">Exploring the Use of an Artificial Accent of English to Assess Phonetic Learning in Monolingual and Bilingual Speakers</div><div class="cpsessionviewpaperauthor">[[Laura Spinu|AUTHOR Laura Spinu]], [[Jiwon Hwang|AUTHOR Jiwon Hwang]], [[Nadya Pincus|AUTHOR Nadya Pincus]], [[Mariana Vasilita|AUTHOR Mariana Vasilita]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2271.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-5|PAPER Wed-1-10-5 — Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech]]</div>|<div class="cpsessionviewpapertitle">Effects of Dialectal Code-Switching on Speech Modules: A Study Using Egyptian Arabic Broadcast Speech</div><div class="cpsessionviewpaperauthor">[[Shammur A. Chowdhury|AUTHOR Shammur A. Chowdhury]], [[Younes Samih|AUTHOR Younes Samih]], [[Mohamed Eldesouki|AUTHOR Mohamed Eldesouki]], [[Ahmed Ali|AUTHOR Ahmed Ali]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3095.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-6|PAPER Wed-1-10-6 — Bilingual Acoustic Voice Variation is Similarly Structured Across Languages]]</div>|<div class="cpsessionviewpapertitle">Bilingual Acoustic Voice Variation is Similarly Structured Across Languages</div><div class="cpsessionviewpaperauthor">[[Khia A. Johnson|AUTHOR Khia A. Johnson]], [[Molly Babel|AUTHOR Molly Babel]], [[Robert A. Fuhrman|AUTHOR Robert A. Fuhrman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1582.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-7|PAPER Wed-1-10-7 — Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Monolingual Data Selection Analysis for English-Mandarin Hybrid Code-Switching Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Haobo Zhang|AUTHOR Haobo Zhang]], [[Haihua Xu|AUTHOR Haihua Xu]], [[Van Tung Pham|AUTHOR Van Tung Pham]], [[Hao Huang|AUTHOR Hao Huang]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-8|PAPER Wed-1-10-8 — Perception and Production of Mandarin Initial Stops by Native Urdu Speakers]]</div>|<div class="cpsessionviewpapertitle">Perception and Production of Mandarin Initial Stops by Native Urdu Speakers</div><div class="cpsessionviewpaperauthor">[[Dan Du|AUTHOR Dan Du]], [[Xianjin Zhu|AUTHOR Xianjin Zhu]], [[Zhu Li|AUTHOR Zhu Li]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2921.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-9|PAPER Wed-1-10-9 — Now You’re Speaking My Language: Visual Language Identification]]</div>|<div class="cpsessionviewpapertitle">Now You’re Speaking My Language: Visual Language Identification</div><div class="cpsessionviewpaperauthor">[[Triantafyllos Afouras|AUTHOR Triantafyllos Afouras]], [[Joon Son Chung|AUTHOR Joon Son Chung]], [[Andrew Zisserman|AUTHOR Andrew Zisserman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1685.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-10-10|PAPER Wed-1-10-10 — The Different Enhancement Roles of Covarying Cues in Thai and Mandarin Tones]]</div>|<div class="cpsessionviewpapertitle">The Different Enhancement Roles of Covarying Cues in Thai and Mandarin Tones</div><div class="cpsessionviewpaperauthor">[[Nari Rhee|AUTHOR Nari Rhee]], [[Jianjing Kuang|AUTHOR Jianjing Kuang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 11|<|
|^Chairs:&nbsp;|^David Looney|
|^&nbsp;|^Sharon Gannot|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1043.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-1|PAPER Wed-1-11-1 — Singing Voice Extraction with Attention-Based Spectrograms Fusion]]</div>|<div class="cpsessionviewpapertitle">Singing Voice Extraction with Attention-Based Spectrograms Fusion</div><div class="cpsessionviewpaperauthor">[[Hao Shi|AUTHOR Hao Shi]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Sheng Li|AUTHOR Sheng Li]], [[Chenchen Ding|AUTHOR Chenchen Ding]], [[Meng Ge|AUTHOR Meng Ge]], [[Nan Li|AUTHOR Nan Li]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Hiroshi Seki|AUTHOR Hiroshi Seki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1400.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-2|PAPER Wed-1-11-2 — Incorporating Broad Phonetic Information for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Incorporating Broad Phonetic Information for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Yen-Ju Lu|AUTHOR Yen-Ju Lu]], [[Chien-Feng Liao|AUTHOR Chien-Feng Liao]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Jeih-weih Hung|AUTHOR Jeih-weih Hung]], [[Yu Tsao|AUTHOR Yu Tsao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1513.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-3|PAPER Wed-1-11-3 — A Recursive Network with Dynamic Attention for Monaural Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">A Recursive Network with Dynamic Attention for Monaural Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Andong Li|AUTHOR Andong Li]], [[Chengshi Zheng|AUTHOR Chengshi Zheng]], [[Cunhang Fan|AUTHOR Cunhang Fan]], [[Renhua Peng|AUTHOR Renhua Peng]], [[Xiaodong Li|AUTHOR Xiaodong Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-4|PAPER Wed-1-11-4 — Constrained Ratio Mask for Speech Enhancement Using DNN]]</div>|<div class="cpsessionviewpapertitle">Constrained Ratio Mask for Speech Enhancement Using DNN</div><div class="cpsessionviewpaperauthor">[[Hongjiang Yu|AUTHOR Hongjiang Yu]], [[Wei-Ping Zhu|AUTHOR Wei-Ping Zhu]], [[Yuhong Yang|AUTHOR Yuhong Yang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2213.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-5|PAPER Wed-1-11-5 — SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning]]</div>|<div class="cpsessionviewpapertitle">SERIL: Noise Adaptive Speech Enhancement Using Regularization-Based Incremental Learning</div><div class="cpsessionviewpaperauthor">[[Chi-Chang Lee|AUTHOR Chi-Chang Lee]], [[Yu-Chen Lin|AUTHOR Yu-Chen Lin]], [[Hsuan-Tien Lin|AUTHOR Hsuan-Tien Lin]], [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]], [[Yu Tsao|AUTHOR Yu Tsao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2291.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-6|PAPER Wed-1-11-6 — Adaptive Neural Speech Enhancement with a Denoising Variational Autoencoder]]</div>|<div class="cpsessionviewpapertitle">Adaptive Neural Speech Enhancement with a Denoising Variational Autoencoder</div><div class="cpsessionviewpaperauthor">[[Yoshiaki Bando|AUTHOR Yoshiaki Bando]], [[Kouhei Sekiguchi|AUTHOR Kouhei Sekiguchi]], [[Kazuyoshi Yoshii|AUTHOR Kazuyoshi Yoshii]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2421.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-7|PAPER Wed-1-11-7 — Low-Latency Single Channel Speech Dereverberation Using U-Net Convolutional Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Low-Latency Single Channel Speech Dereverberation Using U-Net Convolutional Neural Networks</div><div class="cpsessionviewpaperauthor">[[Ahmet E. Bulut|AUTHOR Ahmet E. Bulut]], [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2982.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-8|PAPER Wed-1-11-8 — Single-Channel Speech Enhancement by Subspace Affinity Minimization]]</div>|<div class="cpsessionviewpapertitle">Single-Channel Speech Enhancement by Subspace Affinity Minimization</div><div class="cpsessionviewpaperauthor">[[Dung N. Tran|AUTHOR Dung N. Tran]], [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1030.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-9|PAPER Wed-1-11-9 — Noise Tokens: Learning Neural Noise Templates for Environment-Aware Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Noise Tokens: Learning Neural Noise Templates for Environment-Aware Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Haoyu Li|AUTHOR Haoyu Li]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1133.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-11-10|PAPER Wed-1-11-10 — NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">NAAGN: Noise-Aware Attention-Gated Network for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Feng Deng|AUTHOR Feng Deng]], [[Tao Jiang|AUTHOR Tao Jiang]], [[Xiao-Rui Wang|AUTHOR Xiao-Rui Wang]], [[Chen Zhang|AUTHOR Chen Zhang]], [[Yan Li|AUTHOR Yan Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Sabato Marco Siniscalchi|
|^&nbsp;|^Qin Jin|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1264.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-1|PAPER Wed-1-2-1 — Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency]]</div>|<div class="cpsessionviewpapertitle">Do Face Masks Introduce Bias in Speech Technologies? The Case of Automated Scoring of Speaking Proficiency</div><div class="cpsessionviewpaperauthor">[[Anastassia Loukina|AUTHOR Anastassia Loukina]], [[Keelan Evanini|AUTHOR Keelan Evanini]], [[Matthew Mulholland|AUTHOR Matthew Mulholland]], [[Ian Blood|AUTHOR Ian Blood]], [[Klaus Zechner|AUTHOR Klaus Zechner]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1449.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-2|PAPER Wed-1-2-2 — A Low Latency ASR-Free End to End Spoken Language Understanding System]]</div>|<div class="cpsessionviewpapertitle">A Low Latency ASR-Free End to End Spoken Language Understanding System</div><div class="cpsessionviewpaperauthor">[[Mohamed Mhiri|AUTHOR Mohamed Mhiri]], [[Samuel Myer|AUTHOR Samuel Myer]], [[Vikrant Singh Tomar|AUTHOR Vikrant Singh Tomar]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1843.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-3|PAPER Wed-1-2-3 — An Audio-Based Wakeword-Independent Verification System]]</div>|<div class="cpsessionviewpapertitle">An Audio-Based Wakeword-Independent Verification System</div><div class="cpsessionviewpaperauthor">[[Joe Wang|AUTHOR Joe Wang]], [[Rajath Kumar|AUTHOR Rajath Kumar]], [[Mike Rodehorst|AUTHOR Mike Rodehorst]], [[Brian Kulis|AUTHOR Brian Kulis]], [[Shiv Naga Prasad Vitaladevuni|AUTHOR Shiv Naga Prasad Vitaladevuni]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1878.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-4|PAPER Wed-1-2-4 — Learnable Spectro-Temporal Receptive Fields for Robust Voice Type Discrimination]]</div>|<div class="cpsessionviewpapertitle">Learnable Spectro-Temporal Receptive Fields for Robust Voice Type Discrimination</div><div class="cpsessionviewpaperauthor">[[Tyler Vuong|AUTHOR Tyler Vuong]], [[Yangyang Xia|AUTHOR Yangyang Xia]], [[Richard M. Stern|AUTHOR Richard M. Stern]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-5|PAPER Wed-1-2-5 — Low Latency Speech Recognition Using End-to-End Prefetching]]</div>|<div class="cpsessionviewpapertitle">Low Latency Speech Recognition Using End-to-End Prefetching</div><div class="cpsessionviewpaperauthor">[[Shuo-Yiin Chang|AUTHOR Shuo-Yiin Chang]], [[Bo Li|AUTHOR Bo Li]], [[David Rybach|AUTHOR David Rybach]], [[Yanzhang He|AUTHOR Yanzhang He]], [[Wei Li|AUTHOR Wei Li]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Trevor Strohman|AUTHOR Trevor Strohman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1986.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-6|PAPER Wed-1-2-6 — AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification]]</div>|<div class="cpsessionviewpapertitle">AutoSpeech 2020: The Second Automated Machine Learning Challenge for Speech Classification</div><div class="cpsessionviewpaperauthor">[[Jingsong Wang|AUTHOR Jingsong Wang]], [[Tom Ko|AUTHOR Tom Ko]], [[Zhen Xu|AUTHOR Zhen Xu]], [[Xiawei Guo|AUTHOR Xiawei Guo]], [[Souxiang Liu|AUTHOR Souxiang Liu]], [[Wei-Wei Tu|AUTHOR Wei-Wei Tu]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-7|PAPER Wed-1-2-7 — Building a Robust Word-Level Wakeword Verification Network]]</div>|<div class="cpsessionviewpapertitle">Building a Robust Word-Level Wakeword Verification Network</div><div class="cpsessionviewpaperauthor">[[Rajath Kumar|AUTHOR Rajath Kumar]], [[Mike Rodehorst|AUTHOR Mike Rodehorst]], [[Joe Wang|AUTHOR Joe Wang]], [[Jiacheng Gu|AUTHOR Jiacheng Gu]], [[Brian Kulis|AUTHOR Brian Kulis]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-8|PAPER Wed-1-2-8 — A Transformer-Based Audio Captioning Model with Keyword Estimation]]</div>|<div class="cpsessionviewpapertitle">A Transformer-Based Audio Captioning Model with Keyword Estimation</div><div class="cpsessionviewpaperauthor">[[Yuma Koizumi|AUTHOR Yuma Koizumi]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Kyosuke Nishida|AUTHOR Kyosuke Nishida]], [[Masahiro Yasuda|AUTHOR Masahiro Yasuda]], [[Shoichiro Saito|AUTHOR Shoichiro Saito]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-9|PAPER Wed-1-2-9 — Neural Architecture Search for Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Neural Architecture Search for Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Tong Mo|AUTHOR Tong Mo]], [[Yakun Yu|AUTHOR Yakun Yu]], [[Mohammad Salameh|AUTHOR Mohammad Salameh]], [[Di Niu|AUTHOR Di Niu]], [[Shangling Jui|AUTHOR Shangling Jui]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3177.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-2-10|PAPER Wed-1-2-10 — Small-Footprint Keyword Spotting with Multi-Scale Temporal Convolution]]</div>|<div class="cpsessionviewpapertitle">Small-Footprint Keyword Spotting with Multi-Scale Temporal Convolution</div><div class="cpsessionviewpaperauthor">[[Ximin Li|AUTHOR Ximin Li]], [[Xiaodong Wei|AUTHOR Xiaodong Wei]], [[Xiaowei Qin|AUTHOR Xiaowei Qin]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Zhengchen Zhang|
|^&nbsp;|^Tomiki Toda|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1018.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-1|PAPER Wed-1-3-1 — Using Cyclic Noise as the Source Signal for Neural Source-Filter-Based Speech Waveform Model]]</div>|<div class="cpsessionviewpapertitle">Using Cyclic Noise as the Source Signal for Neural Source-Filter-Based Speech Waveform Model</div><div class="cpsessionviewpaperauthor">[[Xin Wang|AUTHOR Xin Wang]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-2|PAPER Wed-1-3-2 — Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization]]</div>|<div class="cpsessionviewpapertitle">Unconditional Audio Generation with Generative Adversarial Networks and Cycle Regularization</div><div class="cpsessionviewpaperauthor">[[Jen-Yu Liu|AUTHOR Jen-Yu Liu]], [[Yu-Hua Chen|AUTHOR Yu-Hua Chen]], [[Yin-Cheng Yeh|AUTHOR Yin-Cheng Yeh]], [[Yi-Hsuan Yang|AUTHOR Yi-Hsuan Yang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1964.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-3|PAPER Wed-1-3-3 — Complex-Valued Variational Autoencoder: A Novel Deep Generative Model for Direct Representation of Complex Spectra]]</div>|<div class="cpsessionviewpapertitle">Complex-Valued Variational Autoencoder: A Novel Deep Generative Model for Direct Representation of Complex Spectra</div><div class="cpsessionviewpaperauthor">[[Toru Nakashika|AUTHOR Toru Nakashika]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2096.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-4|PAPER Wed-1-3-4 — Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding]]</div>|<div class="cpsessionviewpapertitle">Attentron: Few-Shot Text-to-Speech Utilizing Attention-Based Variable-Length Embedding</div><div class="cpsessionviewpaperauthor">[[Seungwoo Choi|AUTHOR Seungwoo Choi]], [[Seungju Han|AUTHOR Seungju Han]], [[Dongyoung Kim|AUTHOR Dongyoung Kim]], [[Sungjoo Ha|AUTHOR Sungjoo Ha]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2189.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-5|PAPER Wed-1-3-5 — Reformer-TTS: Neural Speech Synthesis with Reformer Network]]</div>|<div class="cpsessionviewpapertitle">Reformer-TTS: Neural Speech Synthesis with Reformer Network</div><div class="cpsessionviewpaperauthor">[[Hyeong Rae Ihm|AUTHOR Hyeong Rae Ihm]], [[Joun Yeop Lee|AUTHOR Joun Yeop Lee]], [[Byoung Jin Choi|AUTHOR Byoung Jin Choi]], [[Sung Jun Cheon|AUTHOR Sung Jun Cheon]], [[Nam Soo Kim|AUTHOR Nam Soo Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-6|PAPER Wed-1-3-6 — CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion]]</div>|<div class="cpsessionviewpapertitle">CycleGAN-VC3: Examining and Improving CycleGAN-VCs for Mel-Spectrogram Conversion</div><div class="cpsessionviewpaperauthor">[[Takuhiro Kaneko|AUTHOR Takuhiro Kaneko]], [[Hirokazu Kameoka|AUTHOR Hirokazu Kameoka]], [[Kou Tanaka|AUTHOR Kou Tanaka]], [[Nobukatsu Hojo|AUTHOR Nobukatsu Hojo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-7|PAPER Wed-1-3-7 — High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency]]</div>|<div class="cpsessionviewpapertitle">High Quality Streaming Speech Synthesis with Low, Sentence-Length-Independent Latency</div><div class="cpsessionviewpaperauthor">[[Nikolaos Ellinas|AUTHOR Nikolaos Ellinas]], [[Georgios Vamvoukakis|AUTHOR Georgios Vamvoukakis]], [[Konstantinos Markopoulos|AUTHOR Konstantinos Markopoulos]], [[Aimilios Chalamandaris|AUTHOR Aimilios Chalamandaris]], [[Georgia Maniati|AUTHOR Georgia Maniati]], [[Panos Kakoulidis|AUTHOR Panos Kakoulidis]], [[Spyros Raptis|AUTHOR Spyros Raptis]], [[June Sig Sung|AUTHOR June Sig Sung]], [[Hyoungmin Park|AUTHOR Hyoungmin Park]], [[Pirros Tsiakoulis|AUTHOR Pirros Tsiakoulis]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2968.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-8|PAPER Wed-1-3-8 — DurIAN: Duration Informed Attention Network for Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">DurIAN: Duration Informed Attention Network for Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Chengzhu Yu|AUTHOR Chengzhu Yu]], [[Heng Lu|AUTHOR Heng Lu]], [[Na Hu|AUTHOR Na Hu]], [[Meng Yu|AUTHOR Meng Yu]], [[Chao Weng|AUTHOR Chao Weng]], [[Kun Xu|AUTHOR Kun Xu]], [[Peng Liu|AUTHOR Peng Liu]], [[Deyi Tuo|AUTHOR Deyi Tuo]], [[Shiyin Kang|AUTHOR Shiyin Kang]], [[Guangzhi Lei|AUTHOR Guangzhi Lei]], [[Dan Su|AUTHOR Dan Su]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3167.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-9|PAPER Wed-1-3-9 — Multi-Speaker Text-to-Speech Synthesis Using Deep Gaussian Processes]]</div>|<div class="cpsessionviewpapertitle">Multi-Speaker Text-to-Speech Synthesis Using Deep Gaussian Processes</div><div class="cpsessionviewpaperauthor">[[Kentaro Mitsui|AUTHOR Kentaro Mitsui]], [[Tomoki Koriyama|AUTHOR Tomoki Koriyama]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-3-10|PAPER Wed-1-3-10 — A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages]]</div>|<div class="cpsessionviewpapertitle">A Hybrid HMM-Waveglow Based Text-to-Speech Synthesizer Using Histogram Equalization for Low Resource Indian Languages</div><div class="cpsessionviewpaperauthor">[[Mano Ranjith Kumar M.|AUTHOR Mano Ranjith Kumar M.]], [[Sudhanshu Srivastava|AUTHOR Sudhanshu Srivastava]], [[Anusha Prakash|AUTHOR Anusha Prakash]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Shinji Watanabe|
|^&nbsp;|^Niko Moritz|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-1|PAPER Wed-1-5-1 — 1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM]]</div>|<div class="cpsessionviewpapertitle">1-D Row-Convolution LSTM: Fast Streaming ASR at Accuracy Parity with LC-BLSTM</div><div class="cpsessionviewpaperauthor">[[Kshitiz Kumar|AUTHOR Kshitiz Kumar]], [[Chaojun Liu|AUTHOR Chaojun Liu]], [[Yifan Gong|AUTHOR Yifan Gong]], [[Jian Wu|AUTHOR Jian Wu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1292.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-2|PAPER Wed-1-5-2 — Low Latency End-to-End Streaming Speech Recognition with a Scout Network]]</div>|<div class="cpsessionviewpapertitle">Low Latency End-to-End Streaming Speech Recognition with a Scout Network</div><div class="cpsessionviewpaperauthor">[[Chengyi Wang|AUTHOR Chengyi Wang]], [[Yu Wu|AUTHOR Yu Wu]], [[Liang Lu|AUTHOR Liang Lu]], [[Shujie Liu|AUTHOR Shujie Liu]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Guoli Ye|AUTHOR Guoli Ye]], [[Ming Zhou|AUTHOR Ming Zhou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2442.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-3|PAPER Wed-1-5-3 — Knowledge Distillation from Offline to Streaming RNN Transducer for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Knowledge Distillation from Offline to Streaming RNN Transducer for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Gakuto Kurata|AUTHOR Gakuto Kurata]], [[George Saon|AUTHOR George Saon]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2875.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-4|PAPER Wed-1-5-4 — Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Parallel Rescoring with Transformer for Streaming On-Device Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Wei Li|AUTHOR Wei Li]], [[James Qin|AUTHOR James Qin]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[Yanzhang He|AUTHOR Yanzhang He]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2770.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-5|PAPER Wed-1-5-5 — Improved Hybrid Streaming ASR with Transformer Language Models]]</div>|<div class="cpsessionviewpapertitle">Improved Hybrid Streaming ASR with Transformer Language Models</div><div class="cpsessionviewpaperauthor">[[Pau Baquero-Arnal|AUTHOR Pau Baquero-Arnal]], [[Javier Jorge|AUTHOR Javier Jorge]], [[Adrià Giménez|AUTHOR Adrià Giménez]], [[Joan Albert Silvestre-Cerdà|AUTHOR Joan Albert Silvestre-Cerdà]], [[Javier Iranzo-Sánchez|AUTHOR Javier Iranzo-Sánchez]], [[Albert Sanchis|AUTHOR Albert Sanchis]], [[Jorge Civera|AUTHOR Jorge Civera]], [[Alfons Juan|AUTHOR Alfons Juan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2079.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-6|PAPER Wed-1-5-6 — Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory]]</div>|<div class="cpsessionviewpapertitle">Streaming Transformer-Based Acoustic Models Using Self-Attention with Augmented Memory</div><div class="cpsessionviewpaperauthor">[[Chunyang Wu|AUTHOR Chunyang Wu]], [[Yongqiang Wang|AUTHOR Yongqiang Wang]], [[Yangyang Shi|AUTHOR Yangyang Shi]], [[Ching-Feng Yeh|AUTHOR Ching-Feng Yeh]], [[Frank Zhang|AUTHOR Frank Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1780.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-7|PAPER Wed-1-5-7 — Enhancing Monotonic Multihead Attention for Streaming ASR]]</div>|<div class="cpsessionviewpapertitle">Enhancing Monotonic Multihead Attention for Streaming ASR</div><div class="cpsessionviewpaperauthor">[[Hirofumi Inaguma|AUTHOR Hirofumi Inaguma]], [[Masato Mimura|AUTHOR Masato Mimura]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-8|PAPER Wed-1-5-8 — Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Streaming Chunk-Aware Multihead Attention for Online End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shiliang Zhang|AUTHOR Shiliang Zhang]], [[Zhifu Gao|AUTHOR Zhifu Gao]], [[Haoneng Luo|AUTHOR Haoneng Luo]], [[Ming Lei|AUTHOR Ming Lei]], [[Jie Gao|AUTHOR Jie Gao]], [[Zhijie Yan|AUTHOR Zhijie Yan]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1863.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-9|PAPER Wed-1-5-9 — High Performance Sequence-to-Sequence Model for Streaming Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">High Performance Sequence-to-Sequence Model for Streaming Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Thai-Son Nguyen|AUTHOR Thai-Son Nguyen]], [[Ngoc-Quan Pham|AUTHOR Ngoc-Quan Pham]], [[Sebastian Stüker|AUTHOR Sebastian Stüker]], [[Alex Waibel|AUTHOR Alex Waibel]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2345.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-5-10|PAPER Wed-1-5-10 — Transfer Learning Approaches for Streaming End-to-End Speech Recognition System]]</div>|<div class="cpsessionviewpapertitle">Transfer Learning Approaches for Streaming End-to-End Speech Recognition System</div><div class="cpsessionviewpaperauthor">[[Vikas Joshi|AUTHOR Vikas Joshi]], [[Rui Zhao|AUTHOR Rui Zhao]], [[Rupesh R. Mehta|AUTHOR Rupesh R. Mehta]], [[Kshitiz Kumar|AUTHOR Kshitiz Kumar]], [[Jinyu Li|AUTHOR Jinyu Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Oldřich Plchot|
|^&nbsp;|^Berrak Sisman|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1132.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-1|PAPER Wed-1-7-1 — NEC-TT Speaker Verification System for SRE’19 CTS Challenge]]</div>|<div class="cpsessionviewpapertitle">NEC-TT Speaker Verification System for SRE’19 CTS Challenge</div><div class="cpsessionviewpaperauthor">[[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Koji Okabe|AUTHOR Koji Okabe]], [[Hitoshi Yamamoto|AUTHOR Hitoshi Yamamoto]], [[Qiongqiong Wang|AUTHOR Qiongqiong Wang]], [[Ling Guo|AUTHOR Ling Guo]], [[Takafumi Koshinaka|AUTHOR Takafumi Koshinaka]], [[Jiacen Zhang|AUTHOR Jiacen Zhang]], [[Keisuke Ishikawa|AUTHOR Keisuke Ishikawa]], [[Koichi Shinoda|AUTHOR Koichi Shinoda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-2|PAPER Wed-1-7-2 — THUEE System for NIST SRE19 CTS Challenge]]</div>|<div class="cpsessionviewpapertitle">THUEE System for NIST SRE19 CTS Challenge</div><div class="cpsessionviewpaperauthor">[[Ruyun Li|AUTHOR Ruyun Li]], [[Tianyu Liang|AUTHOR Tianyu Liang]], [[Dandan Song|AUTHOR Dandan Song]], [[Yi Liu|AUTHOR Yi Liu]], [[Yangcheng Wu|AUTHOR Yangcheng Wu]], [[Can Xu|AUTHOR Can Xu]], [[Peng Ouyang|AUTHOR Peng Ouyang]], [[Xianwei Zhang|AUTHOR Xianwei Zhang]], [[Xianhong Chen|AUTHOR Xianhong Chen]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]], [[Shouyi Yin|AUTHOR Shouyi Yin]], [[Liang He|AUTHOR Liang He]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1434.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-3|PAPER Wed-1-7-3 — Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">Automatic Quality Assessment for Audio-Visual Verification Systems. The //LOVe// Submission to NIST SRE Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Grigory Antipov|AUTHOR Grigory Antipov]], [[Nicolas Gengembre|AUTHOR Nicolas Gengembre]], [[Olivier Le Blouch|AUTHOR Olivier Le Blouch]], [[Gaël Le Lan|AUTHOR Gaël Le Lan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1814.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-4|PAPER Wed-1-7-4 — Audio-Visual Speaker Recognition with a Cross-Modal Discriminative Network]]</div>|<div class="cpsessionviewpapertitle">Audio-Visual Speaker Recognition with a Cross-Modal Discriminative Network</div><div class="cpsessionviewpaperauthor">[[Ruijie Tao|AUTHOR Ruijie Tao]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1996.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-5|PAPER Wed-1-7-5 — Multimodal Association for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Multimodal Association for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Suwon Shon|AUTHOR Suwon Shon]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2229.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-6|PAPER Wed-1-7-6 — Multi-Modality Matters: A Performance Leap on VoxCeleb]]</div>|<div class="cpsessionviewpapertitle">Multi-Modality Matters: A Performance Leap on VoxCeleb</div><div class="cpsessionviewpaperauthor">[[Zhengyang Chen|AUTHOR Zhengyang Chen]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2738.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-7|PAPER Wed-1-7-7 — Cross-Domain Adaptation with Discrepancy Minimization for Text-Independent Forensic Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Cross-Domain Adaptation with Discrepancy Minimization for Text-Independent Forensic Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Zhenyu Wang|AUTHOR Zhenyu Wang]], [[Wei Xia|AUTHOR Wei Xia]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2868.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-8|PAPER Wed-1-7-8 — Open-Set Short Utterance Forensic Speaker Verification Using Teacher-Student Network with Explicit Inductive Bias]]</div>|<div class="cpsessionviewpapertitle">Open-Set Short Utterance Forensic Speaker Verification Using Teacher-Student Network with Explicit Inductive Bias</div><div class="cpsessionviewpaperauthor">[[Mufan Sang|AUTHOR Mufan Sang]], [[Wei Xia|AUTHOR Wei Xia]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2972.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-9|PAPER Wed-1-7-9 — JukeBox: A Multilingual Singer Recognition Dataset]]</div>|<div class="cpsessionviewpapertitle">JukeBox: A Multilingual Singer Recognition Dataset</div><div class="cpsessionviewpaperauthor">[[Anurag Chowdhury|AUTHOR Anurag Chowdhury]], [[Austin Cozzo|AUTHOR Austin Cozzo]], [[Arun Ross|AUTHOR Arun Ross]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3025.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-7-10|PAPER Wed-1-7-10 — Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training]]</div>|<div class="cpsessionviewpapertitle">Speaker Identification for Household Scenarios with Self-Attention and Adversarial Training</div><div class="cpsessionviewpaperauthor">[[Ruirui Li|AUTHOR Ruirui Li]], [[Jyun-Yu Jiang|AUTHOR Jyun-Yu Jiang]], [[Xian Wu|AUTHOR Xian Wu]], [[Chu-Cheng Hsieh|AUTHOR Chu-Cheng Hsieh]], [[Andreas Stolcke|AUTHOR Andreas Stolcke]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 8|<|
|^Chairs:&nbsp;|^Frank Seide|
|^&nbsp;|^Marc Delcroix|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-1|PAPER Wed-1-8-1 — Streaming Keyword Spotting on Mobile Devices]]</div>|<div class="cpsessionviewpapertitle">Streaming Keyword Spotting on Mobile Devices</div><div class="cpsessionviewpaperauthor">[[Oleg Rybakov|AUTHOR Oleg Rybakov]], [[Natasha Kononenko|AUTHOR Natasha Kononenko]], [[Niranjan Subrahmanya|AUTHOR Niranjan Subrahmanya]], [[Mirkó Visontai|AUTHOR Mirkó Visontai]], [[Stella Laurenzo|AUTHOR Stella Laurenzo]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-2|PAPER Wed-1-8-2 — Metadata-Aware End-to-End Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Metadata-Aware End-to-End Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Hongyi Liu|AUTHOR Hongyi Liu]], [[Apurva Abhyankar|AUTHOR Apurva Abhyankar]], [[Yuriy Mishchenko|AUTHOR Yuriy Mishchenko]], [[Thibaud Sénéchal|AUTHOR Thibaud Sénéchal]], [[Gengshen Fu|AUTHOR Gengshen Fu]], [[Brian Kulis|AUTHOR Brian Kulis]], [[Noah D. Stein|AUTHOR Noah D. Stein]], [[Anish Shah|AUTHOR Anish Shah]], [[Shiv Naga Prasad Vitaladevuni|AUTHOR Shiv Naga Prasad Vitaladevuni]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1294.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-3|PAPER Wed-1-8-3 — Adversarial Audio: A New Information Hiding Method]]</div>|<div class="cpsessionviewpapertitle">Adversarial Audio: A New Information Hiding Method</div><div class="cpsessionviewpaperauthor">[[Yehao Kong|AUTHOR Yehao Kong]], [[Jiliang Zhang|AUTHOR Jiliang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1759.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-4|PAPER Wed-1-8-4 — S2IGAN: Speech-to-Image Generation via Adversarial Learning]]</div>|<div class="cpsessionviewpapertitle">S2IGAN: Speech-to-Image Generation via Adversarial Learning</div><div class="cpsessionviewpaperauthor">[[Xinsheng Wang|AUTHOR Xinsheng Wang]], [[Tingting Qiao|AUTHOR Tingting Qiao]], [[Jihua Zhu|AUTHOR Jihua Zhu]], [[Alan Hanjalic|AUTHOR Alan Hanjalic]], [[Odette Scharenborg|AUTHOR Odette Scharenborg]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2173.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-5|PAPER Wed-1-8-5 — Automatic Speech Recognition Benchmark for Air-Traffic Communications]]</div>|<div class="cpsessionviewpapertitle">Automatic Speech Recognition Benchmark for Air-Traffic Communications</div><div class="cpsessionviewpaperauthor">[[Juan Zuluaga-Gomez|AUTHOR Juan Zuluaga-Gomez]], [[Petr Motlicek|AUTHOR Petr Motlicek]], [[Qingran Zhan|AUTHOR Qingran Zhan]], [[Karel Veselý|AUTHOR Karel Veselý]], [[Rudolf Braun|AUTHOR Rudolf Braun]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2639.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-6|PAPER Wed-1-8-6 — Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach]]</div>|<div class="cpsessionviewpapertitle">Whisper Augmented End-to-End/Hybrid Speech Recognition System — CycleGAN Approach</div><div class="cpsessionviewpaperauthor">[[Prithvi R.R. Gudepu|AUTHOR Prithvi R.R. Gudepu]], [[Gowtham P. Vadisetti|AUTHOR Gowtham P. Vadisetti]], [[Abhishek Niranjan|AUTHOR Abhishek Niranjan]], [[Kinnera Saranu|AUTHOR Kinnera Saranu]], [[Raghava Sarma|AUTHOR Raghava Sarma]], [[M. Ali Basha Shaik|AUTHOR M. Ali Basha Shaik]], [[Periyasamy Paramasivam|AUTHOR Periyasamy Paramasivam]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2649.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-7|PAPER Wed-1-8-7 — Risk Forecasting from Earnings Calls Acoustics and Network Correlations]]</div>|<div class="cpsessionviewpapertitle">Risk Forecasting from Earnings Calls Acoustics and Network Correlations</div><div class="cpsessionviewpaperauthor">[[Ramit Sawhney|AUTHOR Ramit Sawhney]], [[Arshiya Aggarwal|AUTHOR Arshiya Aggarwal]], [[Piyush Khanna|AUTHOR Piyush Khanna]], [[Puneet Mathur|AUTHOR Puneet Mathur]], [[Taru Jain|AUTHOR Taru Jain]], [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2787.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-8|PAPER Wed-1-8-8 — SpecMark: A Spectral Watermarking Framework for IP Protection of Speech Recognition Systems]]</div>|<div class="cpsessionviewpapertitle">SpecMark: A Spectral Watermarking Framework for IP Protection of Speech Recognition Systems</div><div class="cpsessionviewpaperauthor">[[Huili Chen|AUTHOR Huili Chen]], [[Bita Darvish|AUTHOR Bita Darvish]], [[Farinaz Koushanfar|AUTHOR Farinaz Koushanfar]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2870.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-8-9|PAPER Wed-1-8-9 — Evaluating Automatically Generated Phoneme Captions for Images]]</div>|<div class="cpsessionviewpapertitle">Evaluating Automatically Generated Phoneme Captions for Images</div><div class="cpsessionviewpaperauthor">[[Justin van der Hout|AUTHOR Justin van der Hout]], [[Zoltán D’Haese|AUTHOR Zoltán D’Haese]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]], [[Odette Scharenborg|AUTHOR Odette Scharenborg]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Shahin Amiriparian|
|^&nbsp;|^Bin Liu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2636.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-9-1|PAPER Wed-1-9-1 — An Efficient Temporal Modeling Approach for Speech Emotion Recognition by Mapping Varied Duration Sentences into Fixed Number of Chunks]]</div>|<div class="cpsessionviewpapertitle">An Efficient Temporal Modeling Approach for Speech Emotion Recognition by Mapping Varied Duration Sentences into Fixed Number of Chunks</div><div class="cpsessionviewpaperauthor">[[Wei-Cheng Lin|AUTHOR Wei-Cheng Lin]], [[Carlos Busso|AUTHOR Carlos Busso]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3190.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-9-2|PAPER Wed-1-9-2 — Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Deep Architecture Enhancing Robustness to Noise, Adversarial Attacks, and Cross-Corpus Setting for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Siddique Latif|AUTHOR Siddique Latif]], [[Rajib Rana|AUTHOR Rajib Rana]], [[Sara Khalifa|AUTHOR Sara Khalifa]], [[Raja Jurdak|AUTHOR Raja Jurdak]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1082.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-9-3|PAPER Wed-1-9-3 — Meta-Learning for Speech Emotion Recognition Considering Ambiguity of Emotion Labels]]</div>|<div class="cpsessionviewpapertitle">Meta-Learning for Speech Emotion Recognition Considering Ambiguity of Emotion Labels</div><div class="cpsessionviewpaperauthor">[[Takuya Fujioka|AUTHOR Takuya Fujioka]], [[Takeshi Homma|AUTHOR Takeshi Homma]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1520.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-9-4|PAPER Wed-1-9-4 — Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation]]</div>|<div class="cpsessionviewpapertitle">Temporal Attention Convolutional Network for Speech Emotion Recognition with Latent Representation</div><div class="cpsessionviewpaperauthor">[[Jiaxing Liu|AUTHOR Jiaxing Liu]], [[Zhilei Liu|AUTHOR Zhilei Liu]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Yuan Gao|AUTHOR Yuan Gao]], [[Lili Guo|AUTHOR Lili Guo]], [[Jianwu Dang|AUTHOR Jianwu Dang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1618.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-9-5|PAPER Wed-1-9-5 — Reconciliation of Multiple Corpora for Speech Emotion Recognition by Multiple Classifiers with an Adversarial Corpus Discriminator]]</div>|<div class="cpsessionviewpapertitle">Reconciliation of Multiple Corpora for Speech Emotion Recognition by Multiple Classifiers with an Adversarial Corpus Discriminator</div><div class="cpsessionviewpaperauthor">[[Zhi Zhu|AUTHOR Zhi Zhu]], [[Yoshinao Sato|AUTHOR Yoshinao Sato]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1703.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-9-6|PAPER Wed-1-9-6 — Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Conversational Emotion Recognition Using Self-Attention Mechanisms and Graph Neural Networks</div><div class="cpsessionviewpaperauthor">[[Zheng Lian|AUTHOR Zheng Lian]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jian Huang|AUTHOR Jian Huang]], [[Zhanlei Yang|AUTHOR Zhanlei Yang]], [[Rongjun Li|AUTHOR Rongjun Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1762.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-9-7|PAPER Wed-1-9-7 — EigenEmo: Spectral Utterance Representation Using Dynamic Mode Decomposition for Speech Emotion Classification]]</div>|<div class="cpsessionviewpapertitle">EigenEmo: Spectral Utterance Representation Using Dynamic Mode Decomposition for Speech Emotion Classification</div><div class="cpsessionviewpaperauthor">[[Shuiyang Mao|AUTHOR Shuiyang Mao]], [[P.C. Ching|AUTHOR P.C. Ching]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1779.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-1-9-8|PAPER Wed-1-9-8 — Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Advancing Multiple Instance Learning with Attention Modeling for Categorical Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Shuiyang Mao|AUTHOR Shuiyang Mao]], [[P.C. Ching|AUTHOR P.C. Ching]], [[C.-C. Jay Kuo|AUTHOR C.-C. Jay Kuo]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Volker Hohmann|
|^&nbsp;|^Deniz Bashkent|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2836.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-1|PAPER Wed-2-1-1 — The Implication of Sound Level on Spatial Selective Auditory Attention for Cochlear Implant Users: Behavioral and Electrophysiological Measurement]]</div>|<div class="cpsessionviewpapertitle">The Implication of Sound Level on Spatial Selective Auditory Attention for Cochlear Implant Users: Behavioral and Electrophysiological Measurement</div><div class="cpsessionviewpaperauthor">[[Sara Akbarzadeh|AUTHOR Sara Akbarzadeh]], [[Sungmin Lee|AUTHOR Sungmin Lee]], [[Chin-Tuan Tan|AUTHOR Chin-Tuan Tan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2507.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-2|PAPER Wed-2-1-2 — Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder]]</div>|<div class="cpsessionviewpapertitle">Enhancing the Interaural Time Difference of Bilateral Cochlear Implants with the Temporal Limits Encoder</div><div class="cpsessionviewpaperauthor">[[Yangyang Wan|AUTHOR Yangyang Wan]], [[Huali Zhou|AUTHOR Huali Zhou]], [[Qinglin Meng|AUTHOR Qinglin Meng]], [[Nengheng Zheng|AUTHOR Nengheng Zheng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1081.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-3|PAPER Wed-2-1-3 — Speech Clarity Improvement by Vocal Self-Training Using a Hearing Impairment Simulator and its Correlation with an Auditory Modulation Index]]</div>|<div class="cpsessionviewpapertitle">Speech Clarity Improvement by Vocal Self-Training Using a Hearing Impairment Simulator and its Correlation with an Auditory Modulation Index</div><div class="cpsessionviewpaperauthor">[[Toshio Irino|AUTHOR Toshio Irino]], [[Soichi Higashiyama|AUTHOR Soichi Higashiyama]], [[Hanako Yoshigi|AUTHOR Hanako Yoshigi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1481.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-4|PAPER Wed-2-1-4 — Investigation of Phase Distortion on Perceived Speech Quality for Hearing-Impaired Listeners]]</div>|<div class="cpsessionviewpapertitle">Investigation of Phase Distortion on Perceived Speech Quality for Hearing-Impaired Listeners</div><div class="cpsessionviewpaperauthor">[[Zhuohuang Zhang|AUTHOR Zhuohuang Zhang]], [[Donald S. Williamson|AUTHOR Donald S. Williamson]], [[Yi Shen|AUTHOR Yi Shen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2013.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-5|PAPER Wed-2-1-5 — EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning]]</div>|<div class="cpsessionviewpapertitle">EEG-Based Short-Time Auditory Attention Detection Using Multi-Task Deep Learning</div><div class="cpsessionviewpaperauthor">[[Zhuo Zhang|AUTHOR Zhuo Zhang]], [[Gaoyan Zhang|AUTHOR Gaoyan Zhang]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Shuang Wu|AUTHOR Shuang Wu]], [[Di Zhou|AUTHOR Di Zhou]], [[Longbiao Wang|AUTHOR Longbiao Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2239.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-6|PAPER Wed-2-1-6 — Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification]]</div>|<div class="cpsessionviewpapertitle">Towards Interpreting Deep Learning Models to Understand Loss of Speech Intelligibility in Speech Disorders — Step 1: CNN Model-Based Phone Classification</div><div class="cpsessionviewpaperauthor">[[Sondes Abderrazek|AUTHOR Sondes Abderrazek]], [[Corinne Fredouille|AUTHOR Corinne Fredouille]], [[Alain Ghio|AUTHOR Alain Ghio]], [[Muriel Lalain|AUTHOR Muriel Lalain]], [[Christine Meunier|AUTHOR Christine Meunier]], [[Virginie Woisard|AUTHOR Virginie Woisard]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2433.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-7|PAPER Wed-2-1-7 — Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation]]</div>|<div class="cpsessionviewpapertitle">Improving Cognitive Impairment Classification by Generative Neural Network-Based Feature Augmentation</div><div class="cpsessionviewpaperauthor">[[Bahman Mirheidari|AUTHOR Bahman Mirheidari]], [[Daniel Blackburn|AUTHOR Daniel Blackburn]], [[Ronan O’Malley|AUTHOR Ronan O’Malley]], [[Annalena Venneri|AUTHOR Annalena Venneri]], [[Traci Walker|AUTHOR Traci Walker]], [[Markus Reuber|AUTHOR Markus Reuber]], [[Heidi Christensen|AUTHOR Heidi Christensen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3093.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-8|PAPER Wed-2-1-8 — UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech]]</div>|<div class="cpsessionviewpapertitle">UncommonVoice: A Crowdsourced Dataset of Dysphonic Speech</div><div class="cpsessionviewpaperauthor">[[Meredith Moore|AUTHOR Meredith Moore]], [[Piyush Papreja|AUTHOR Piyush Papreja]], [[Michael Saxon|AUTHOR Michael Saxon]], [[Visar Berisha|AUTHOR Visar Berisha]], [[Sethuraman Panchanathan|AUTHOR Sethuraman Panchanathan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-9|PAPER Wed-2-1-9 — Towards Automatic Assessment of Voice Disorders: A Clinical Approach]]</div>|<div class="cpsessionviewpapertitle">Towards Automatic Assessment of Voice Disorders: A Clinical Approach</div><div class="cpsessionviewpaperauthor">[[Purva Barche|AUTHOR Purva Barche]], [[Krishna Gurugubelli|AUTHOR Krishna Gurugubelli]], [[Anil Kumar Vuppala|AUTHOR Anil Kumar Vuppala]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2880.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-1-10|PAPER Wed-2-1-10 — BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages]]</div>|<div class="cpsessionviewpapertitle">BlaBla: Linguistic Feature Extraction for Clinical Analysis in Multiple Languages</div><div class="cpsessionviewpaperauthor">[[Abhishek Shivkumar|AUTHOR Abhishek Shivkumar]], [[Jack Weston|AUTHOR Jack Weston]], [[Raphael Lenain|AUTHOR Raphael Lenain]], [[Emil Fristed|AUTHOR Emil Fristed]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Kwanchiva Thangthai|
|^&nbsp;|^Shuichi Sakamoto|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1139.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-1|PAPER Wed-2-10-1 — Transfer Learning of Articulatory Information Through Phone Information]]</div>|<div class="cpsessionviewpapertitle">Transfer Learning of Articulatory Information Through Phone Information</div><div class="cpsessionviewpaperauthor">[[Abdolreza Sabzi Shahrebabaki|AUTHOR Abdolreza Sabzi Shahrebabaki]], [[Negar Olfati|AUTHOR Negar Olfati]], [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]], [[Giampiero Salvi|AUTHOR Giampiero Salvi]], [[Torbjørn Svendsen|AUTHOR Torbjørn Svendsen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1140.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-2|PAPER Wed-2-10-2 — Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals]]</div>|<div class="cpsessionviewpapertitle">Sequence-to-Sequence Articulatory Inversion Through Time Convolution of Sub-Band Frequency Signals</div><div class="cpsessionviewpaperauthor">[[Abdolreza Sabzi Shahrebabaki|AUTHOR Abdolreza Sabzi Shahrebabaki]], [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]], [[Giampiero Salvi|AUTHOR Giampiero Salvi]], [[Torbjørn Svendsen|AUTHOR Torbjørn Svendsen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2134.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-3|PAPER Wed-2-10-3 — Discriminative Singular Spectrum Analysis for Bioacoustic Classification]]</div>|<div class="cpsessionviewpapertitle">Discriminative Singular Spectrum Analysis for Bioacoustic Classification</div><div class="cpsessionviewpaperauthor">[[Bernardo B. Gatto|AUTHOR Bernardo B. Gatto]], [[Eulanda M. dos Santos|AUTHOR Eulanda M. dos Santos]], [[Juan G. Colonna|AUTHOR Juan G. Colonna]], [[Naoya Sogi|AUTHOR Naoya Sogi]], [[Lincon S. Souza|AUTHOR Lincon S. Souza]], [[Kazuhiro Fukui|AUTHOR Kazuhiro Fukui]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2259.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-4|PAPER Wed-2-10-4 — Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data]]</div>|<div class="cpsessionviewpapertitle">Speech Rate Task-Specific Representation Learning from Acoustic-Articulatory Data</div><div class="cpsessionviewpaperauthor">[[Renuka Mannem|AUTHOR Renuka Mannem]], [[Hima Jyothi R.|AUTHOR Hima Jyothi R.]], [[Aravind Illa|AUTHOR Aravind Illa]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-5|PAPER Wed-2-10-5 — Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics]]</div>|<div class="cpsessionviewpapertitle">Dysarthria Detection and Severity Assessment Using Rhythm-Based Metrics</div><div class="cpsessionviewpaperauthor">[[Abner Hernandez|AUTHOR Abner Hernandez]], [[Eun Jung Yeo|AUTHOR Eun Jung Yeo]], [[Sunhee Kim|AUTHOR Sunhee Kim]], [[Minhwa Chung|AUTHOR Minhwa Chung]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2487.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-6|PAPER Wed-2-10-6 — LungRN+NL: An Improved Adventitious Lung Sound Classification Using Non-Local Block ResNet Neural Network with Mixup Data Augmentation]]</div>|<div class="cpsessionviewpapertitle">LungRN+NL: An Improved Adventitious Lung Sound Classification Using Non-Local Block ResNet Neural Network with Mixup Data Augmentation</div><div class="cpsessionviewpaperauthor">[[Yi Ma|AUTHOR Yi Ma]], [[Xinzi Xu|AUTHOR Xinzi Xu]], [[Yongfu Li|AUTHOR Yongfu Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2708.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-7|PAPER Wed-2-10-7 — Attention and Encoder-Decoder Based Models for Transforming Articulatory Movements at Different Speaking Rates]]</div>|<div class="cpsessionviewpapertitle">Attention and Encoder-Decoder Based Models for Transforming Articulatory Movements at Different Speaking Rates</div><div class="cpsessionviewpaperauthor">[[Abhayjeet Singh|AUTHOR Abhayjeet Singh]], [[Aravind Illa|AUTHOR Aravind Illa]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2790.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-8|PAPER Wed-2-10-8 — Adventitious Respiratory Classification Using Attentive Residual Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Adventitious Respiratory Classification Using Attentive Residual Neural Networks</div><div class="cpsessionviewpaperauthor">[[Zijiang Yang|AUTHOR Zijiang Yang]], [[Shuo Liu|AUTHOR Shuo Liu]], [[Meishu Song|AUTHOR Meishu Song]], [[Emilia Parada-Cabaleiro|AUTHOR Emilia Parada-Cabaleiro]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2879.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-9|PAPER Wed-2-10-9 — Surfboard: Audio Feature Extraction for Modern Machine Learning]]</div>|<div class="cpsessionviewpapertitle">Surfboard: Audio Feature Extraction for Modern Machine Learning</div><div class="cpsessionviewpaperauthor">[[Raphael Lenain|AUTHOR Raphael Lenain]], [[Jack Weston|AUTHOR Jack Weston]], [[Abhishek Shivkumar|AUTHOR Abhishek Shivkumar]], [[Emil Fristed|AUTHOR Emil Fristed]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3217.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-10-10|PAPER Wed-2-10-10 — Whisper Activity Detection Using CNN-LSTM Based Attention Pooling Network Trained for a Speaker Identification Task]]</div>|<div class="cpsessionviewpapertitle">Whisper Activity Detection Using CNN-LSTM Based Attention Pooling Network Trained for a Speaker Identification Task</div><div class="cpsessionviewpaperauthor">[[Abinay Reddy Naini|AUTHOR Abinay Reddy Naini]], [[Malla Satyapriya|AUTHOR Malla Satyapriya]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 11|<|
|^Chairs:&nbsp;|^Chao Zhang|
|^&nbsp;|^Shifeng Pan|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1163.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-1|PAPER Wed-2-11-1 — Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Towards Natural Bilingual and Code-Switched Speech Synthesis Based on Mix of Monolingual Recordings and Cross-Lingual Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Shengkui Zhao|AUTHOR Shengkui Zhao]], [[Trung Hieu Nguyen|AUTHOR Trung Hieu Nguyen]], [[Hao Wang|AUTHOR Hao Wang]], [[Bin Ma|AUTHOR Bin Ma]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1464.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-2|PAPER Wed-2-11-2 — Multi-Lingual Multi-Speaker Text-to-Speech Synthesis for Voice Cloning with Online Speaker Enrollment]]</div>|<div class="cpsessionviewpapertitle">Multi-Lingual Multi-Speaker Text-to-Speech Synthesis for Voice Cloning with Online Speaker Enrollment</div><div class="cpsessionviewpaperauthor">[[Zhaoyu Liu|AUTHOR Zhaoyu Liu]], [[Brian Mak|AUTHOR Brian Mak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1754.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-3|PAPER Wed-2-11-3 — Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Dynamic Soft Windowing and Language Dependent Style Token for Code-Switching End-to-End Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Ruibo Fu|AUTHOR Ruibo Fu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Chunyu Qiang|AUTHOR Chunyu Qiang]], [[Tao Wang|AUTHOR Tao Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-4|PAPER Wed-2-11-4 — Phonological Features for 0-Shot Multilingual Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Phonological Features for 0-Shot Multilingual Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Marlene Staib|AUTHOR Marlene Staib]], [[Tian Huey Teh|AUTHOR Tian Huey Teh]], [[Alexandra Torresquintero|AUTHOR Alexandra Torresquintero]], [[Devang S. Ram Mohan|AUTHOR Devang S. Ram Mohan]], [[Lorenzo Foglianti|AUTHOR Lorenzo Foglianti]], [[Raphael Lenain|AUTHOR Raphael Lenain]], [[Jiameng Gao|AUTHOR Jiameng Gao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2070.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-5|PAPER Wed-2-11-5 — Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space]]</div>|<div class="cpsessionviewpapertitle">Cross-Lingual Text-To-Speech Synthesis via Domain Adaptation and Perceptual Similarity Regression in Speaker Space</div><div class="cpsessionviewpaperauthor">[[Detai Xin|AUTHOR Detai Xin]], [[Yuki Saito|AUTHOR Yuki Saito]], [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]], [[Tomoki Koriyama|AUTHOR Tomoki Koriyama]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2180.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-6|PAPER Wed-2-11-6 — Tone Learning in Low-Resource Bilingual TTS]]</div>|<div class="cpsessionviewpapertitle">Tone Learning in Low-Resource Bilingual TTS</div><div class="cpsessionviewpaperauthor">[[Ruolan Liu|AUTHOR Ruolan Liu]], [[Xue Wen|AUTHOR Xue Wen]], [[Chunhui Lu|AUTHOR Chunhui Lu]], [[Xiao Chen|AUTHOR Xiao Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2654.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-7|PAPER Wed-2-11-7 — On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model]]</div>|<div class="cpsessionviewpapertitle">On Improving Code Mixed Speech Synthesis with Mixlingual Grapheme-to-Phoneme Model</div><div class="cpsessionviewpaperauthor">[[Shubham Bansal|AUTHOR Shubham Bansal]], [[Arijit Mukherjee|AUTHOR Arijit Mukherjee]], [[Sandeepkumar Satpal|AUTHOR Sandeepkumar Satpal]], [[Rupeshkumar Mehta|AUTHOR Rupeshkumar Mehta]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2663.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-8|PAPER Wed-2-11-8 — Generic Indic Text-to-Speech Synthesisers with Rapid Adaptation in an End-to-End Framework]]</div>|<div class="cpsessionviewpapertitle">Generic Indic Text-to-Speech Synthesisers with Rapid Adaptation in an End-to-End Framework</div><div class="cpsessionviewpaperauthor">[[Anusha Prakash|AUTHOR Anusha Prakash]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2664.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-9|PAPER Wed-2-11-9 — Efficient Neural Speech Synthesis for Low-Resource Languages Through Multilingual Modeling]]</div>|<div class="cpsessionviewpapertitle">Efficient Neural Speech Synthesis for Low-Resource Languages Through Multilingual Modeling</div><div class="cpsessionviewpaperauthor">[[Marcel de Korte|AUTHOR Marcel de Korte]], [[Jaebok Kim|AUTHOR Jaebok Kim]], [[Esther Klabbers|AUTHOR Esther Klabbers]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2679.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-11-10|PAPER Wed-2-11-10 — One Model, Many Languages: Meta-Learning for Multilingual Text-to-Speech]]</div>|<div class="cpsessionviewpapertitle">One Model, Many Languages: Meta-Learning for Multilingual Text-to-Speech</div><div class="cpsessionviewpaperauthor">[[Tomáš Nekvinda|AUTHOR Tomáš Nekvinda]], [[Ondřej Dušek|AUTHOR Ondřej Dušek]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 12|<|
|^Chairs:&nbsp;|^Qingyang Hong|
|^&nbsp;|^Md Jahangir Alam|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-1|PAPER Wed-2-12-1 — In Defence of Metric Learning for Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">In Defence of Metric Learning for Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Joon Son Chung|AUTHOR Joon Son Chung]], [[Jaesung Huh|AUTHOR Jaesung Huh]], [[Seongkyu Mun|AUTHOR Seongkyu Mun]], [[Minjae Lee|AUTHOR Minjae Lee]], [[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Soyeon Choe|AUTHOR Soyeon Choe]], [[Chiheon Ham|AUTHOR Chiheon Ham]], [[Sunghwan Jung|AUTHOR Sunghwan Jung]], [[Bong-Jin Lee|AUTHOR Bong-Jin Lee]], [[Icksang Han|AUTHOR Icksang Han]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1283.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-2|PAPER Wed-2-12-2 — Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs]]</div>|<div class="cpsessionviewpapertitle">Meta-Learning for Short Utterance Speaker Recognition with Imbalance Length Pairs</div><div class="cpsessionviewpaperauthor">[[Seong Min Kye|AUTHOR Seong Min Kye]], [[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Hae Beom Lee|AUTHOR Hae Beom Lee]], [[Sung Ju Hwang|AUTHOR Sung Ju Hwang]], [[Hoirin Kim|AUTHOR Hoirin Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1700.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-3|PAPER Wed-2-12-3 — Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Segment-Level Effects of Gender, Nationality and Emotion Information on Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Kai Li|AUTHOR Kai Li]], [[Masato Akagi|AUTHOR Masato Akagi]], [[Yibo Wu|AUTHOR Yibo Wu]], [[Jianwu Dang|AUTHOR Jianwu Dang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1774.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-4|PAPER Wed-2-12-4 — Weakly Supervised Training of Hierarchical Attention Networks for Speaker Identification]]</div>|<div class="cpsessionviewpapertitle">Weakly Supervised Training of Hierarchical Attention Networks for Speaker Identification</div><div class="cpsessionviewpaperauthor">[[Yanpei Shi|AUTHOR Yanpei Shi]], [[Qiang Huang|AUTHOR Qiang Huang]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1857.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-5|PAPER Wed-2-12-5 — Multi-Task Learning for Voice Related Recognition Tasks]]</div>|<div class="cpsessionviewpapertitle">Multi-Task Learning for Voice Related Recognition Tasks</div><div class="cpsessionviewpaperauthor">[[Ana Montalvo|AUTHOR Ana Montalvo]], [[Jose R. Calvo|AUTHOR Jose R. Calvo]], [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1882.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-6|PAPER Wed-2-12-6 — Unsupervised Training of Siamese Networks for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Training of Siamese Networks for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Umair Khan|AUTHOR Umair Khan]], [[Javier Hernando|AUTHOR Javier Hernando]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1922.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-7|PAPER Wed-2-12-7 — An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions]]</div>|<div class="cpsessionviewpapertitle">An Effective Speaker Recognition Method Based on Joint Identification and Verification Supervisions</div><div class="cpsessionviewpaperauthor">[[Ying Liu|AUTHOR Ying Liu]], [[Yan Song|AUTHOR Yan Song]], [[Yiheng Jiang|AUTHOR Yiheng Jiang]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Lin Liu|AUTHOR Lin Liu]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2061.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-8|PAPER Wed-2-12-8 — Speaker-Aware Linear Discriminant Analysis in Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Speaker-Aware Linear Discriminant Analysis in Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Naijun Zheng|AUTHOR Naijun Zheng]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Jinghua Zhong|AUTHOR Jinghua Zhong]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2226.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-12-9|PAPER Wed-2-12-9 — Adversarial Domain Adaptation for Speaker Verification Using Partially Shared Network]]</div>|<div class="cpsessionviewpapertitle">Adversarial Domain Adaptation for Speaker Verification Using Partially Shared Network</div><div class="cpsessionviewpaperauthor">[[Zhengyang Chen|AUTHOR Zhengyang Chen]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Emre Yılmaz|
|^&nbsp;|^Tom Ko|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1045.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-1|PAPER Wed-2-2-1 — Depthwise Separable Convolutional ResNet with Squeeze-and-Excitation Blocks for Small-Footprint Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Depthwise Separable Convolutional ResNet with Squeeze-and-Excitation Blocks for Small-Footprint Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Menglong Xu|AUTHOR Menglong Xu]], [[Xiao-Lei Zhang|AUTHOR Xiao-Lei Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1186.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-2|PAPER Wed-2-2-2 — Predicting Detection Filters for Small Footprint Open-Vocabulary Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Predicting Detection Filters for Small Footprint Open-Vocabulary Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Théodore Bluche|AUTHOR Théodore Bluche]], [[Thibault Gisselbrecht|AUTHOR Thibault Gisselbrecht]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-3|PAPER Wed-2-2-3 — Deep Convolutional Spiking Neural Networks for Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Deep Convolutional Spiking Neural Networks for Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Özgür Bora Gevrek|AUTHOR Özgür Bora Gevrek]], [[Jibin Wu|AUTHOR Jibin Wu]], [[Yuxiang Chen|AUTHOR Yuxiang Chen]], [[Xuanbo Meng|AUTHOR Xuanbo Meng]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1412.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-4|PAPER Wed-2-2-4 — Domain Aware Training for Far-Field Small-Footprint Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Domain Aware Training for Far-Field Small-Footprint Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Haiwei Wu|AUTHOR Haiwei Wu]], [[Yan Jia|AUTHOR Yan Jia]], [[Yuanfei Nie|AUTHOR Yuanfei Nie]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1644.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-5|PAPER Wed-2-2-5 — Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Re-Weighted Interval Loss for Handling Data Imbalance Problem of End-to-End Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Kun Zhang|AUTHOR Kun Zhang]], [[Zhiyong Wu|AUTHOR Zhiyong Wu]], [[Daode Yuan|AUTHOR Daode Yuan]], [[Jian Luan|AUTHOR Jian Luan]], [[Jia Jia|AUTHOR Jia Jia]], [[Helen Meng|AUTHOR Helen Meng]], [[Binheng Song|AUTHOR Binheng Song]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1761.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-6|PAPER Wed-2-2-6 — Deep Template Matching for Small-Footprint and Configurable Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Deep Template Matching for Small-Footprint and Configurable Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Peng Zhang|AUTHOR Peng Zhang]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2185.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-7|PAPER Wed-2-2-7 — Multi-Scale Convolution for Robust Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">Multi-Scale Convolution for Robust Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Chen Yang|AUTHOR Chen Yang]], [[Xue Wen|AUTHOR Xue Wen]], [[Liming Song|AUTHOR Liming Song]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2568.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-8|PAPER Wed-2-2-8 — An Investigation of Few-Shot Learning in Spoken Term Classification]]</div>|<div class="cpsessionviewpapertitle">An Investigation of Few-Shot Learning in Spoken Term Classification</div><div class="cpsessionviewpaperauthor">[[Yangbin Chen|AUTHOR Yangbin Chen]], [[Tom Ko|AUTHOR Tom Ko]], [[Lifeng Shang|AUTHOR Lifeng Shang]], [[Xiao Chen|AUTHOR Xiao Chen]], [[Xin Jiang|AUTHOR Xin Jiang]], [[Qing Li|AUTHOR Qing Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2613.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-9|PAPER Wed-2-2-9 — End-to-End Keyword Search Based on Attention and Energy Scorer for Low Resource Languages]]</div>|<div class="cpsessionviewpapertitle">End-to-End Keyword Search Based on Attention and Energy Scorer for Low Resource Languages</div><div class="cpsessionviewpaperauthor">[[Zeyu Zhao|AUTHOR Zeyu Zhao]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2763.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-2-10|PAPER Wed-2-2-10 — Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection]]</div>|<div class="cpsessionviewpapertitle">Stacked 1D Convolutional Networks for End-to-End Small Footprint Voice Trigger Detection</div><div class="cpsessionviewpaperauthor">[[Takuya Higuchi|AUTHOR Takuya Higuchi]], [[Mohammad Ghasemzadeh|AUTHOR Mohammad Ghasemzadeh]], [[Kisun You|AUTHOR Kisun You]], [[Chandra Dhir|AUTHOR Chandra Dhir]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Takahiro Shinozaki|
|^&nbsp;|^Emmanuel Vincent|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0034.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-4-1|PAPER Wed-2-4-1 — Separating Varying Numbers of Sources with Auxiliary Autoencoding Loss]]</div>|<div class="cpsessionviewpapertitle">Separating Varying Numbers of Sources with Auxiliary Autoencoding Loss</div><div class="cpsessionviewpaperauthor">[[Yi Luo|AUTHOR Yi Luo]], [[Nima Mesgarani|AUTHOR Nima Mesgarani]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1150.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-4-2|PAPER Wed-2-4-2 — On Synthesis for Supervised Monaural Speech Separation in Time Domain]]</div>|<div class="cpsessionviewpapertitle">On Synthesis for Supervised Monaural Speech Separation in Time Domain</div><div class="cpsessionviewpaperauthor">[[Jingjing Chen|AUTHOR Jingjing Chen]], [[Qirong Mao|AUTHOR Qirong Mao]], [[Dong Liu|AUTHOR Dong Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1545.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-4-3|PAPER Wed-2-4-3 — Learning Better Speech Representations by Worsening Interference]]</div>|<div class="cpsessionviewpapertitle">Learning Better Speech Representations by Worsening Interference</div><div class="cpsessionviewpaperauthor">[[Jun Wang|AUTHOR Jun Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1673.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-4-4|PAPER Wed-2-4-4 — Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers]]</div>|<div class="cpsessionviewpapertitle">Asteroid: The PyTorch-Based Audio Source Separation Toolkit for Researchers</div><div class="cpsessionviewpaperauthor">[[Manuel Pariente|AUTHOR Manuel Pariente]], [[Samuele Cornell|AUTHOR Samuele Cornell]], [[Joris Cosentino|AUTHOR Joris Cosentino]], [[Sunit Sivasankaran|AUTHOR Sunit Sivasankaran]], [[Efthymios Tzinis|AUTHOR Efthymios Tzinis]], [[Jens Heitkaemper|AUTHOR Jens Heitkaemper]], [[Michel Olvera|AUTHOR Michel Olvera]], [[Fabian-Robert Stöter|AUTHOR Fabian-Robert Stöter]], [[Mathieu Hu|AUTHOR Mathieu Hu]], [[Juan M. Martín-Doñas|AUTHOR Juan M. Martín-Doñas]], [[David Ditter|AUTHOR David Ditter]], [[Ariel Frank|AUTHOR Ariel Frank]], [[Antoine Deleforge|AUTHOR Antoine Deleforge]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2205.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-4-6|PAPER Wed-2-4-6 — Dual-Path Transformer Network: Direct Context-Aware Modeling for End-to-End Monaural Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Dual-Path Transformer Network: Direct Context-Aware Modeling for End-to-End Monaural Speech Separation</div><div class="cpsessionviewpaperauthor">[[Jingjing Chen|AUTHOR Jingjing Chen]], [[Qirong Mao|AUTHOR Qirong Mao]], [[Dong Liu|AUTHOR Dong Liu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2371.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-4-7|PAPER Wed-2-4-7 — Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet]]</div>|<div class="cpsessionviewpapertitle">Conv-TasSAN: Separative Adversarial Network Based on Conv-TasNet</div><div class="cpsessionviewpaperauthor">[[Chengyun Deng|AUTHOR Chengyun Deng]], [[Yi Zhang|AUTHOR Yi Zhang]], [[Shiqian Ma|AUTHOR Shiqian Ma]], [[Yongtao Sha|AUTHOR Yongtao Sha]], [[Hui Song|AUTHOR Hui Song]], [[Xiangang Li|AUTHOR Xiangang Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2388.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-4-8|PAPER Wed-2-4-8 — Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation]]</div>|<div class="cpsessionviewpapertitle">Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation</div><div class="cpsessionviewpaperauthor">[[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Thilo von Neumann|AUTHOR Thilo von Neumann]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3115.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-4-9|PAPER Wed-2-4-9 — Unsupervised Audio Source Separation Using Generative Priors]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Audio Source Separation Using Generative Priors</div><div class="cpsessionviewpaperauthor">[[Vivek Narayanaswamy|AUTHOR Vivek Narayanaswamy]], [[Jayaraman J. Thiagarajan|AUTHOR Jayaraman J. Thiagarajan]], [[Rushil Anirudh|AUTHOR Rushil Anirudh]], [[Andreas Spanias|AUTHOR Andreas Spanias]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Keisuke Kinoshita|
|^&nbsp;|^Junfeng Li|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1593.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-1|PAPER Wed-2-5-1 — Adversarial Latent Representation Learning for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Adversarial Latent Representation Learning for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Yuanhang Qiu|AUTHOR Yuanhang Qiu]], [[Ruili Wang|AUTHOR Ruili Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1047.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-2|PAPER Wed-2-5-2 — An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence]]</div>|<div class="cpsessionviewpapertitle">An NMF-HMM Speech Enhancement Method Based on Kullback-Leibler Divergence</div><div class="cpsessionviewpaperauthor">[[Yang Xiang|AUTHOR Yang Xiang]], [[Liming Shi|AUTHOR Liming Shi]], [[Jesper Lisby Højvang|AUTHOR Jesper Lisby Højvang]], [[Morten Højfeldt Rasmussen|AUTHOR Morten Højfeldt Rasmussen]], [[Mads Græsbøll Christensen|AUTHOR Mads Græsbøll Christensen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1104.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-3|PAPER Wed-2-5-3 — Multi-Scale TCN: Exploring Better Temporal DNN Model for Causal Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Multi-Scale TCN: Exploring Better Temporal DNN Model for Causal Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Lu Zhang|AUTHOR Lu Zhang]], [[Mingjiang Wang|AUTHOR Mingjiang Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1193.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-4|PAPER Wed-2-5-4 — VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">VoiceFilter-Lite: Streaming Targeted Voice Separation for On-Device Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Quan Wang|AUTHOR Quan Wang]], [[Ignacio Lopez Moreno|AUTHOR Ignacio Lopez Moreno]], [[Mert Saglam|AUTHOR Mert Saglam]], [[Kevin Wilson|AUTHOR Kevin Wilson]], [[Alan Chiao|AUTHOR Alan Chiao]], [[Renjie Liu|AUTHOR Renjie Liu]], [[Yanzhang He|AUTHOR Yanzhang He]], [[Wei Li|AUTHOR Wei Li]], [[Jason Pelecanos|AUTHOR Jason Pelecanos]], [[Marily Nika|AUTHOR Marily Nika]], [[Alexander Gruenstein|AUTHOR Alexander Gruenstein]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-5|PAPER Wed-2-5-5 — Speech Separation Based on Multi-Stage Elaborated Dual-Path Deep BiLSTM with Auxiliary Identity Loss]]</div>|<div class="cpsessionviewpapertitle">Speech Separation Based on Multi-Stage Elaborated Dual-Path Deep BiLSTM with Auxiliary Identity Loss</div><div class="cpsessionviewpaperauthor">[[Ziqiang Shi|AUTHOR Ziqiang Shi]], [[Rujie Liu|AUTHOR Rujie Liu]], [[Jiqing Han|AUTHOR Jiqing Han]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1539.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-6|PAPER Wed-2-5-6 — Sub-Band Knowledge Distillation Framework for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Sub-Band Knowledge Distillation Framework for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Xiang Hao|AUTHOR Xiang Hao]], [[Shixue Wen|AUTHOR Shixue Wen]], [[Xiangdong Su|AUTHOR Xiangdong Su]], [[Yun Liu|AUTHOR Yun Liu]], [[Guanglai Gao|AUTHOR Guanglai Gao]], [[Xiaofei Li|AUTHOR Xiaofei Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1551.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-7|PAPER Wed-2-5-7 — A Deep Learning-Based Kalman Filter for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">A Deep Learning-Based Kalman Filter for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Sujan Kumar Roy|AUTHOR Sujan Kumar Roy]], [[Aaron Nicolson|AUTHOR Aaron Nicolson]], [[Kuldip K. Paliwal|AUTHOR Kuldip K. Paliwal]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1913.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-8|PAPER Wed-2-5-8 — Subband Kalman Filtering with DNN Estimated Parameters for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Subband Kalman Filtering with DNN Estimated Parameters for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Hongjiang Yu|AUTHOR Hongjiang Yu]], [[Wei-Ping Zhu|AUTHOR Wei-Ping Zhu]], [[Benoit Champagne|AUTHOR Benoit Champagne]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2245.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-9|PAPER Wed-2-5-9 — Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Bidirectional LSTM Network with Ordered Neurons for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Xiaoqi Li|AUTHOR Xiaoqi Li]], [[Yaxing Li|AUTHOR Yaxing Li]], [[Yuanjie Dong|AUTHOR Yuanjie Dong]], [[Shan Xu|AUTHOR Shan Xu]], [[Zhihui Zhang|AUTHOR Zhihui Zhang]], [[Dan Wang|AUTHOR Dan Wang]], [[Shengwu Xiong|AUTHOR Shengwu Xiong]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2418.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-5-10|PAPER Wed-2-5-10 — Speaker-Conditional Chain Model for Speech Separation and Extraction]]</div>|<div class="cpsessionviewpapertitle">Speaker-Conditional Chain Model for Speech Separation and Extraction</div><div class="cpsessionviewpaperauthor">[[Jing Shi|AUTHOR Jing Shi]], [[Jiaming Xu|AUTHOR Jiaming Xu]], [[Yusuke Fujita|AUTHOR Yusuke Fujita]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Bo Xu|AUTHOR Bo Xu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 6|<|
|^Chairs:&nbsp;|^Karen Livescu|
|^&nbsp;|^Mikko Kurimo|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0087.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-1|PAPER Wed-2-6-1 — Unsupervised vs. Transfer Learning for Multimodal One-Shot Matching of Speech and Images]]</div>|<div class="cpsessionviewpapertitle">Unsupervised vs. Transfer Learning for Multimodal One-Shot Matching of Speech and Images</div><div class="cpsessionviewpaperauthor">[[Leanne Nortje|AUTHOR Leanne Nortje]], [[Herman Kamper|AUTHOR Herman Kamper]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2312.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-2|PAPER Wed-2-6-2 — Multimodal Speech Emotion Recognition Using Cross Attention with Aligned Audio and Text]]</div>|<div class="cpsessionviewpapertitle">Multimodal Speech Emotion Recognition Using Cross Attention with Aligned Audio and Text</div><div class="cpsessionviewpaperauthor">[[Yoonhyung Lee|AUTHOR Yoonhyung Lee]], [[Seunghyun Yoon|AUTHOR Seunghyun Yoon]], [[Kyomin Jung|AUTHOR Kyomin Jung]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0015.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-3|PAPER Wed-2-6-3 — Speaker Dependent Articulatory-to-Acoustic Mapping Using Real-Time MRI of the Vocal Tract]]</div>|<div class="cpsessionviewpapertitle">Speaker Dependent Articulatory-to-Acoustic Mapping Using Real-Time MRI of the Vocal Tract</div><div class="cpsessionviewpaperauthor">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1031.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-4|PAPER Wed-2-6-4 — Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Ultrasound-Based Articulatory-to-Acoustic Mapping with WaveGlow Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]], [[Csaba Zainkó|AUTHOR Csaba Zainkó]], [[László Tóth|AUTHOR László Tóth]], [[Gábor Gosztolya|AUTHOR Gábor Gosztolya]], [[Alexandra Markó|AUTHOR Alexandra Markó]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1170.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-5|PAPER Wed-2-6-5 — Unsupervised Subword Modeling Using Autoregressive Pretraining and Cross-Lingual Phone-Aware Modeling]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Subword Modeling Using Autoregressive Pretraining and Cross-Lingual Phone-Aware Modeling</div><div class="cpsessionviewpaperauthor">[[Siyuan Feng|AUTHOR Siyuan Feng]], [[Odette Scharenborg|AUTHOR Odette Scharenborg]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1195.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-6|PAPER Wed-2-6-6 — Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Generative Adversarial Training Data Adaptation for Very Low-Resource Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Kohei Matsuura|AUTHOR Kohei Matsuura]], [[Masato Mimura|AUTHOR Masato Mimura]], [[Shinsuke Sakai|AUTHOR Shinsuke Sakai]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2110.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-7|PAPER Wed-2-6-7 — Neural Speech Completion]]</div>|<div class="cpsessionviewpapertitle">Neural Speech Completion</div><div class="cpsessionviewpaperauthor">[[Kazuki Tsunematsu|AUTHOR Kazuki Tsunematsu]], [[Johanes Effendi|AUTHOR Johanes Effendi]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2629.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-8|PAPER Wed-2-6-8 — Improving Unsupervised Sparsespeech Acoustic Models with Categorical Reparameterization]]</div>|<div class="cpsessionviewpapertitle">Improving Unsupervised Sparsespeech Acoustic Models with Categorical Reparameterization</div><div class="cpsessionviewpaperauthor">[[Benjamin Milde|AUTHOR Benjamin Milde]], [[Chris Biemann|AUTHOR Chris Biemann]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2691.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-9|PAPER Wed-2-6-9 — Multimodal Sign Language Recognition via Temporal Deformable Convolutional Sequence Learning]]</div>|<div class="cpsessionviewpapertitle">Multimodal Sign Language Recognition via Temporal Deformable Convolutional Sequence Learning</div><div class="cpsessionviewpaperauthor">[[Katerina Papadimitriou|AUTHOR Katerina Papadimitriou]], [[Gerasimos Potamianos|AUTHOR Gerasimos Potamianos]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2826.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-6-10|PAPER Wed-2-6-10 — MLS: A Large-Scale Multilingual Dataset for Speech Research]]</div>|<div class="cpsessionviewpapertitle">MLS: A Large-Scale Multilingual Dataset for Speech Research</div><div class="cpsessionviewpaperauthor">[[Vineel Pratap|AUTHOR Vineel Pratap]], [[Qiantong Xu|AUTHOR Qiantong Xu]], [[Anuroop Sriram|AUTHOR Anuroop Sriram]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 8|<|
|^Chairs:&nbsp;|^George Saon|
|^&nbsp;|^Mohamed Abdel-Rahman|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1280.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-1|PAPER Wed-2-8-1 — Semi-Supervised ASR by End-to-End Self-Training]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised ASR by End-to-End Self-Training</div><div class="cpsessionviewpaperauthor">[[Yang Chen|AUTHOR Yang Chen]], [[Weiran Wang|AUTHOR Weiran Wang]], [[Chao Wang|AUTHOR Chao Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2036.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-2|PAPER Wed-2-8-2 — Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants]]</div>|<div class="cpsessionviewpapertitle">Improved Training Strategies for End-to-End Speech Recognition in Digital Voice Assistants</div><div class="cpsessionviewpaperauthor">[[Hitesh Tulsiani|AUTHOR Hitesh Tulsiani]], [[Ashtosh Sapru|AUTHOR Ashtosh Sapru]], [[Harish Arsikere|AUTHOR Harish Arsikere]], [[Surabhi Punjabi|AUTHOR Surabhi Punjabi]], [[Sri Garimella|AUTHOR Sri Garimella]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-3|PAPER Wed-2-8-3 — Serialized Output Training for End-to-End Overlapped Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Serialized Output Training for End-to-End Overlapped Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Xiaofei Wang|AUTHOR Xiaofei Wang]], [[Zhong Meng|AUTHOR Zhong Meng]], [[Takuya Yoshioka|AUTHOR Takuya Yoshioka]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1337.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-4|PAPER Wed-2-8-4 — Semi-Supervised Learning with Data Augmentation for End-to-End ASR]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Learning with Data Augmentation for End-to-End ASR</div><div class="cpsessionviewpaperauthor">[[Felix Weninger|AUTHOR Felix Weninger]], [[Franco Mana|AUTHOR Franco Mana]], [[Roberto Gemello|AUTHOR Roberto Gemello]], [[Jesús Andrés-Ferrer|AUTHOR Jesús Andrés-Ferrer]], [[Puming Zhan|AUTHOR Puming Zhan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-5|PAPER Wed-2-8-5 — Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Efficient Minimum Word Error Rate Training of RNN-Transducer for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jinxi Guo|AUTHOR Jinxi Guo]], [[Gautam Tiwari|AUTHOR Gautam Tiwari]], [[Jasha Droppo|AUTHOR Jasha Droppo]], [[Maarten Van Segbroeck|AUTHOR Maarten Van Segbroeck]], [[Che-Wei Huang|AUTHOR Che-Wei Huang]], [[Andreas Stolcke|AUTHOR Andreas Stolcke]], [[Roland Maas|AUTHOR Roland Maas]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1855.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-6|PAPER Wed-2-8-6 — A New Training Pipeline for an Improved Neural Transducer]]</div>|<div class="cpsessionviewpapertitle">A New Training Pipeline for an Improved Neural Transducer</div><div class="cpsessionviewpaperauthor">[[Albert Zeyer|AUTHOR Albert Zeyer]], [[André Merboldt|AUTHOR André Merboldt]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-7|PAPER Wed-2-8-7 — Improved Noisy Student Training for Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Improved Noisy Student Training for Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Daniel S. Park|AUTHOR Daniel S. Park]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Ye Jia|AUTHOR Ye Jia]], [[Wei Han|AUTHOR Wei Han]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[Bo Li|AUTHOR Bo Li]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Quoc V. Le|AUTHOR Quoc V. Le]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1930.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-8|PAPER Wed-2-8-8 — Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Phoneme-to-Grapheme Conversion Based Large-Scale Pre-Training for End-to-End Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Ryo Masumura|AUTHOR Ryo Masumura]], [[Naoki Makishima|AUTHOR Naoki Makishima]], [[Mana Ihori|AUTHOR Mana Ihori]], [[Akihiko Takashima|AUTHOR Akihiko Takashima]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Shota Orihashi|AUTHOR Shota Orihashi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3230.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-9|PAPER Wed-2-8-9 — Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Utterance Invariant Training for Hybrid Two-Pass End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]], [[Ankur Kumar|AUTHOR Ankur Kumar]], [[Kwangyoun Kim|AUTHOR Kwangyoun Kim]], [[Hejung Yang|AUTHOR Hejung Yang]], [[Abhinav Garg|AUTHOR Abhinav Garg]], [[Sachin Singh|AUTHOR Sachin Singh]], [[Jiyeon Kim|AUTHOR Jiyeon Kim]], [[Mehul Kumar|AUTHOR Mehul Kumar]], [[Sichen Jin|AUTHOR Sichen Jin]], [[Shatrughan Singh|AUTHOR Shatrughan Singh]], [[Chanwoo Kim|AUTHOR Chanwoo Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2920.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-8-10|PAPER Wed-2-8-10 — SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR]]</div>|<div class="cpsessionviewpapertitle">SCADA: Stochastic, Consistent and Adversarial Data Augmentation to Improve ASR</div><div class="cpsessionviewpaperauthor">[[Gary Wang|AUTHOR Gary Wang]], [[Andrew Rosenberg|AUTHOR Andrew Rosenberg]], [[Zhehuai Chen|AUTHOR Zhehuai Chen]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]], [[Pedro J. Moreno|AUTHOR Pedro J. Moreno]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Vincent Barriac|
|^&nbsp;|^Changchun Bao|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1067.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-9-1|PAPER Wed-2-9-1 — Fundamental Frequency Model for Postfiltering at Low Bitrates in a Transform-Domain Speech and Audio Codec]]</div>|<div class="cpsessionviewpapertitle">Fundamental Frequency Model for Postfiltering at Low Bitrates in a Transform-Domain Speech and Audio Codec</div><div class="cpsessionviewpaperauthor">[[Sneha Das|AUTHOR Sneha Das]], [[Tom Bäckström|AUTHOR Tom Bäckström]], [[Guillaume Fuchs|AUTHOR Guillaume Fuchs]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2818.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-9-2|PAPER Wed-2-9-2 — Hearing-Impaired Bio-Inspired Cochlear Models for Real-Time Auditory Applications]]</div>|<div class="cpsessionviewpapertitle">Hearing-Impaired Bio-Inspired Cochlear Models for Real-Time Auditory Applications</div><div class="cpsessionviewpaperauthor">[[Arthur Van Den Broucke|AUTHOR Arthur Van Den Broucke]], [[Deepak Baby|AUTHOR Deepak Baby]], [[Sarah Verhulst|AUTHOR Sarah Verhulst]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-9-3|PAPER Wed-2-9-3 — Improving Opus Low Bit Rate Quality with Neural Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Improving Opus Low Bit Rate Quality with Neural Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Jan Skoglund|AUTHOR Jan Skoglund]], [[Jean-Marc Valin|AUTHOR Jean-Marc Valin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1191.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-9-4|PAPER Wed-2-9-4 — A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences]]</div>|<div class="cpsessionviewpapertitle">A Differentiable Perceptual Audio Metric Learned from Just Noticeable Differences</div><div class="cpsessionviewpaperauthor">[[Pranay Manocha|AUTHOR Pranay Manocha]], [[Adam Finkelstein|AUTHOR Adam Finkelstein]], [[Richard Zhang|AUTHOR Richard Zhang]], [[Nicholas J. Bryan|AUTHOR Nicholas J. Bryan]], [[Gautham J. Mysore|AUTHOR Gautham J. Mysore]], [[Zeyu Jin|AUTHOR Zeyu Jin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2261.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-9-5|PAPER Wed-2-9-5 — StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation]]</div>|<div class="cpsessionviewpapertitle">StoRIR: Stochastic Room Impulse Response Generation for Audio Data Augmentation</div><div class="cpsessionviewpaperauthor">[[Piotr Masztalski|AUTHOR Piotr Masztalski]], [[Mateusz Matuszewski|AUTHOR Mateusz Matuszewski]], [[Karol Piaskowski|AUTHOR Karol Piaskowski]], [[Michal Romaniuk|AUTHOR Michal Romaniuk]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2665.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-9-6|PAPER Wed-2-9-6 — An Open Source Implementation of ITU-T Recommendation P.808 with Validation]]</div>|<div class="cpsessionviewpapertitle">An Open Source Implementation of ITU-T Recommendation P.808 with Validation</div><div class="cpsessionviewpaperauthor">[[Babak Naderi|AUTHOR Babak Naderi]], [[Ross Cutler|AUTHOR Ross Cutler]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2760.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-9-7|PAPER Wed-2-9-7 — DNN No-Reference PSTN Speech Quality Prediction]]</div>|<div class="cpsessionviewpapertitle">DNN No-Reference PSTN Speech Quality Prediction</div><div class="cpsessionviewpaperauthor">[[Gabriel Mittag|AUTHOR Gabriel Mittag]], [[Ross Cutler|AUTHOR Ross Cutler]], [[Yasaman Hosseinkashi|AUTHOR Yasaman Hosseinkashi]], [[Michael Revow|AUTHOR Michael Revow]], [[Sriram Srinivasan|AUTHOR Sriram Srinivasan]], [[Naglakshmi Chande|AUTHOR Naglakshmi Chande]], [[Robert Aichner|AUTHOR Robert Aichner]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1125.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-2-9-8|PAPER Wed-2-9-8 — Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality]]</div>|<div class="cpsessionviewpapertitle">Non-Intrusive Diagnostic Monitoring of Fullband Speech Quality</div><div class="cpsessionviewpaperauthor">[[Sebastian Möller|AUTHOR Sebastian Möller]], [[Tobias Hübschen|AUTHOR Tobias Hübschen]], [[Thilo Michael|AUTHOR Thilo Michael]], [[Gabriel Mittag|AUTHOR Gabriel Mittag]], [[Gerhard Schmidt|AUTHOR Gerhard Schmidt]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 1|<|
|^Chairs:&nbsp;|^Xavier Anguera|
|^&nbsp;|^Donna Erickson|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1282.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-1|PAPER Wed-3-1-1 — Automatic Scoring at Multi-Granularity for L2 Pronunciation]]</div>|<div class="cpsessionviewpapertitle">Automatic Scoring at Multi-Granularity for L2 Pronunciation</div><div class="cpsessionviewpaperauthor">[[Binghuai Lin|AUTHOR Binghuai Lin]], [[Liyuan Wang|AUTHOR Liyuan Wang]], [[Xiaoli Feng|AUTHOR Xiaoli Feng]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1605.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-2|PAPER Wed-3-1-2 — An Effective End-to-End Modeling Approach for Mispronunciation Detection]]</div>|<div class="cpsessionviewpapertitle">An Effective End-to-End Modeling Approach for Mispronunciation Detection</div><div class="cpsessionviewpaperauthor">[[Tien-Hong Lo|AUTHOR Tien-Hong Lo]], [[Shi-Yan Weng|AUTHOR Shi-Yan Weng]], [[Hsiu-Jui Chang|AUTHOR Hsiu-Jui Chang]], [[Berlin Chen|AUTHOR Berlin Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1616.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-3|PAPER Wed-3-1-3 — An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling]]</div>|<div class="cpsessionviewpapertitle">An End-to-End Mispronunciation Detection System for L2 English Speech Leveraging Novel Anti-Phone Modeling</div><div class="cpsessionviewpaperauthor">[[Bi-Cheng Yan|AUTHOR Bi-Cheng Yan]], [[Meng-Che Wu|AUTHOR Meng-Che Wu]], [[Hsiao-Tsung Hung|AUTHOR Hsiao-Tsung Hung]], [[Berlin Chen|AUTHOR Berlin Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1657.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-4|PAPER Wed-3-1-4 — Unsupervised Feature Adaptation Using Adversarial Multi-Task Training for Automatic Evaluation of Children’s Speech]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Feature Adaptation Using Adversarial Multi-Task Training for Automatic Evaluation of Children’s Speech</div><div class="cpsessionviewpaperauthor">[[Richeng Duan|AUTHOR Richeng Duan]], [[Nancy F. Chen|AUTHOR Nancy F. Chen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2033.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-5|PAPER Wed-3-1-5 — Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning]]</div>|<div class="cpsessionviewpapertitle">Pronunciation Erroneous Tendency Detection with Language Adversarial Represent Learning</div><div class="cpsessionviewpaperauthor">[[Longfei Yang|AUTHOR Longfei Yang]], [[Kaiqi Fu|AUTHOR Kaiqi Fu]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]], [[Takahiro Shinozaki|AUTHOR Takahiro Shinozaki]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2623.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-6|PAPER Wed-3-1-6 — ASR-Free Pronunciation Assessment]]</div>|<div class="cpsessionviewpapertitle">ASR-Free Pronunciation Assessment</div><div class="cpsessionviewpaperauthor">[[Sitong Cheng|AUTHOR Sitong Cheng]], [[Zhixin Liu|AUTHOR Zhixin Liu]], [[Lantian Li|AUTHOR Lantian Li]], [[Zhiyuan Tang|AUTHOR Zhiyuan Tang]], [[Dong Wang|AUTHOR Dong Wang]], [[Thomas Fang Zheng|AUTHOR Thomas Fang Zheng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2881.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-7|PAPER Wed-3-1-7 — Automatic Detection of Accent and Lexical Pronunciation Errors in Spontaneous Non-Native English Speech]]</div>|<div class="cpsessionviewpapertitle">Automatic Detection of Accent and Lexical Pronunciation Errors in Spontaneous Non-Native English Speech</div><div class="cpsessionviewpaperauthor">[[Konstantinos Kyriakopoulos|AUTHOR Konstantinos Kyriakopoulos]], [[Kate M. Knill|AUTHOR Kate M. Knill]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2953.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-8|PAPER Wed-3-1-8 — Context-Aware Goodness of Pronunciation for Computer-Assisted Pronunciation Training]]</div>|<div class="cpsessionviewpapertitle">Context-Aware Goodness of Pronunciation for Computer-Assisted Pronunciation Training</div><div class="cpsessionviewpaperauthor">[[Jiatong Shi|AUTHOR Jiatong Shi]], [[Nan Huo|AUTHOR Nan Huo]], [[Qin Jin|AUTHOR Qin Jin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3109.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-1-9|PAPER Wed-3-1-9 — Recognize Mispronunciations to Improve Non-Native Acoustic Modeling Through a Phone Decoder Built from One Edit Distance Finite State Automaton]]</div>|<div class="cpsessionviewpapertitle">Recognize Mispronunciations to Improve Non-Native Acoustic Modeling Through a Phone Decoder Built from One Edit Distance Finite State Automaton</div><div class="cpsessionviewpaperauthor">[[Wei Chu|AUTHOR Wei Chu]], [[Yang Liu|AUTHOR Yang Liu]], [[Jianwei Zhou|AUTHOR Jianwei Zhou]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 10|<|
|^Chairs:&nbsp;|^Junichi Yamagishi|
|^&nbsp;|^Keikichi Hirose|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1323.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-1|PAPER Wed-3-10-1 — Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network]]</div>|<div class="cpsessionviewpapertitle">Multi-Speaker Emotion Conversion via Latent Variable Regularization and a Chained Encoder-Decoder-Predictor Network</div><div class="cpsessionviewpaperauthor">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Hsi-Wei Hsieh|AUTHOR Hsi-Wei Hsieh]], [[Nicolas Charon|AUTHOR Nicolas Charon]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1325.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-2|PAPER Wed-3-10-2 — Non-Parallel Emotion Conversion Using a Deep-Generative Hybrid Network and an Adversarial Pair Discriminator]]</div>|<div class="cpsessionviewpapertitle">Non-Parallel Emotion Conversion Using a Deep-Generative Hybrid Network and an Adversarial Pair Discriminator</div><div class="cpsessionviewpaperauthor">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Jacob Sager|AUTHOR Jacob Sager]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-3|PAPER Wed-3-10-3 — Laughter Synthesis: Combining Seq2seq Modeling with Transfer Learning]]</div>|<div class="cpsessionviewpapertitle">Laughter Synthesis: Combining Seq2seq Modeling with Transfer Learning</div><div class="cpsessionviewpaperauthor">[[Noé Tits|AUTHOR Noé Tits]], [[Kevin El Haddad|AUTHOR Kevin El Haddad]], [[Thierry Dutoit|AUTHOR Thierry Dutoit]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1647.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-4|PAPER Wed-3-10-4 — Nonparallel Emotional Speech Conversion Using VAE-GAN]]</div>|<div class="cpsessionviewpapertitle">Nonparallel Emotional Speech Conversion Using VAE-GAN</div><div class="cpsessionviewpaperauthor">[[Yuexin Cao|AUTHOR Yuexin Cao]], [[Zhengchen Liu|AUTHOR Zhengchen Liu]], [[Minchuan Chen|AUTHOR Minchuan Chen]], [[Jun Ma|AUTHOR Jun Ma]], [[Shaojun Wang|AUTHOR Shaojun Wang]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1854.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-5|PAPER Wed-3-10-5 — Principal Style Components: Expressive Style Control and Cross-Speaker Transfer in Neural TTS]]</div>|<div class="cpsessionviewpapertitle">Principal Style Components: Expressive Style Control and Cross-Speaker Transfer in Neural TTS</div><div class="cpsessionviewpaperauthor">[[Alexander Sorin|AUTHOR Alexander Sorin]], [[Slava Shechtman|AUTHOR Slava Shechtman]], [[Ron Hoory|AUTHOR Ron Hoory]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2014.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-6|PAPER Wed-3-10-6 — Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Converting Anyone’s Emotion: Towards Speaker-Independent Emotional Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Kun Zhou|AUTHOR Kun Zhou]], [[Berrak Sisman|AUTHOR Berrak Sisman]], [[Mingyang Zhang|AUTHOR Mingyang Zhang]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2064.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-7|PAPER Wed-3-10-7 — Controlling the Strength of Emotions in Speech-Like Emotional Sound Generated by WaveNet]]</div>|<div class="cpsessionviewpapertitle">Controlling the Strength of Emotions in Speech-Like Emotional Sound Generated by WaveNet</div><div class="cpsessionviewpaperauthor">[[Kento Matsumoto|AUTHOR Kento Matsumoto]], [[Sunao Hara|AUTHOR Sunao Hara]], [[Masanobu Abe|AUTHOR Masanobu Abe]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2228.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-8|PAPER Wed-3-10-8 — Learning Syllable-Level Discrete Prosodic Representation for Expressive Speech Generation]]</div>|<div class="cpsessionviewpapertitle">Learning Syllable-Level Discrete Prosodic Representation for Expressive Speech Generation</div><div class="cpsessionviewpaperauthor">[[Guangyan Zhang|AUTHOR Guangyan Zhang]], [[Ying Qin|AUTHOR Ying Qin]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2262.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-9|PAPER Wed-3-10-9 — Simultaneous Conversion of Speaker Identity and Emotion Based on Multiple-Domain Adaptive RBM]]</div>|<div class="cpsessionviewpapertitle">Simultaneous Conversion of Speaker Identity and Emotion Based on Multiple-Domain Adaptive RBM</div><div class="cpsessionviewpaperauthor">[[Takuya Kishida|AUTHOR Takuya Kishida]], [[Shin Tsukamoto|AUTHOR Shin Tsukamoto]], [[Toru Nakashika|AUTHOR Toru Nakashika]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2423.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-10|PAPER Wed-3-10-10 — Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Exploiting Deep Sentential Context for Expressive End-to-End Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Fengyu Yang|AUTHOR Fengyu Yang]], [[Shan Yang|AUTHOR Shan Yang]], [[Qinghua Wu|AUTHOR Qinghua Wu]], [[Yujun Wang|AUTHOR Yujun Wang]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2477.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-11|PAPER Wed-3-10-11 — Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Hierarchical Multi-Grained Generative Model for Expressive Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Yukiya Hono|AUTHOR Yukiya Hono]], [[Kazuna Tsuboi|AUTHOR Kazuna Tsuboi]], [[Kei Sawada|AUTHOR Kei Sawada]], [[Kei Hashimoto|AUTHOR Kei Hashimoto]], [[Keiichiro Oura|AUTHOR Keiichiro Oura]], [[Yoshihiko Nankaku|AUTHOR Yoshihiko Nankaku]], [[Keiichi Tokuda|AUTHOR Keiichi Tokuda]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2898.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-12|PAPER Wed-3-10-12 — GAN-Based Data Generation for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">GAN-Based Data Generation for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Sefik Emre Eskimez|AUTHOR Sefik Emre Eskimez]], [[Dimitrios Dimitriadis|AUTHOR Dimitrios Dimitriadis]], [[Robert Gmyr|AUTHOR Robert Gmyr]], [[Kenichi Kumanati|AUTHOR Kenichi Kumanati]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3046.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-10-13|PAPER Wed-3-10-13 — The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted]]</div>|<div class="cpsessionviewpapertitle">The Phonetic Bases of Vocal Expressed Emotion: Natural versus Acted</div><div class="cpsessionviewpaperauthor">[[Hira Dhamyal|AUTHOR Hira Dhamyal]], [[Shahan Ali Memon|AUTHOR Shahan Ali Memon]], [[Bhiksha Raj|AUTHOR Bhiksha Raj]], [[Rita Singh|AUTHOR Rita Singh]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 12|<|
|^Chairs:&nbsp;|^Chiori Hori|
|^&nbsp;|^Xiao Sun|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1065.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-1|PAPER Wed-3-12-1 — FaceFilter: Audio-Visual Speech Separation Using Still Images]]</div>|<div class="cpsessionviewpapertitle">FaceFilter: Audio-Visual Speech Separation Using Still Images</div><div class="cpsessionviewpaperauthor">[[Soo-Whan Chung|AUTHOR Soo-Whan Chung]], [[Soyeon Choe|AUTHOR Soyeon Choe]], [[Joon Son Chung|AUTHOR Joon Son Chung]], [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1113.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-2|PAPER Wed-3-12-2 — Seeing Voices and Hearing Voices: Learning Discriminative Embeddings Using Cross-Modal Self-Supervision]]</div>|<div class="cpsessionviewpapertitle">Seeing Voices and Hearing Voices: Learning Discriminative Embeddings Using Cross-Modal Self-Supervision</div><div class="cpsessionviewpaperauthor">[[Soo-Whan Chung|AUTHOR Soo-Whan Chung]], [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]], [[Joon Son Chung|AUTHOR Joon Son Chung]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2117.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-3|PAPER Wed-3-12-3 — Fusion Architectures for Word-Based Audiovisual Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Fusion Architectures for Word-Based Audiovisual Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Michael Wand|AUTHOR Michael Wand]], [[Jürgen Schmidhuber|AUTHOR Jürgen Schmidhuber]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2346.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-4|PAPER Wed-3-12-4 — Audio-Visual Multi-Channel Recognition of Overlapped Speech]]</div>|<div class="cpsessionviewpapertitle">Audio-Visual Multi-Channel Recognition of Overlapped Speech</div><div class="cpsessionviewpaperauthor">[[Jianwei Yu|AUTHOR Jianwei Yu]], [[Bo Wu|AUTHOR Bo Wu]], [[Rongzhi Gu|AUTHOR Rongzhi Gu]], [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]], [[Lianwu Chen|AUTHOR Lianwu Chen]], [[Yong Xu|AUTHOR Yong Xu]], [[Meng Yu|AUTHOR Meng Yu]], [[Dan Su|AUTHOR Dan Su]], [[Dong Yu|AUTHOR Dong Yu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2359.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-5|PAPER Wed-3-12-5 — TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog]]</div>|<div class="cpsessionviewpapertitle">TMT: A Transformer-Based Modal Translator for Improving Multimodal Sequence Representations in Audio Visual Scene-Aware Dialog</div><div class="cpsessionviewpaperauthor">[[Wubo Li|AUTHOR Wubo Li]], [[Dongwei Jiang|AUTHOR Dongwei Jiang]], [[Wei Zou|AUTHOR Wei Zou]], [[Xiangang Li|AUTHOR Xiangang Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2480.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-6|PAPER Wed-3-12-6 — Should we Hard-Code the Recurrence Concept or Learn it Instead ? Exploring the Transformer Architecture for Audio-Visual Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Should we Hard-Code the Recurrence Concept or Learn it Instead ? Exploring the Transformer Architecture for Audio-Visual Speech Recognition</div><div class="cpsessionviewpaperauthor">[[George Sterpu|AUTHOR George Sterpu]], [[Christian Saam|AUTHOR Christian Saam]], [[Naomi Harte|AUTHOR Naomi Harte]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3003.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-7|PAPER Wed-3-12-7 — Resource-Adaptive Deep Learning for Visual Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Resource-Adaptive Deep Learning for Visual Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Alexandros Koumparoulis|AUTHOR Alexandros Koumparoulis]], [[Gerasimos Potamianos|AUTHOR Gerasimos Potamianos]], [[Samuel Thomas|AUTHOR Samuel Thomas]], [[Edmilson da Silva Morais|AUTHOR Edmilson da Silva Morais]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3024.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-8|PAPER Wed-3-12-8 — Speech-Image Semantic Alignment Does Not Depend on Any Prior Classification Tasks]]</div>|<div class="cpsessionviewpapertitle">Speech-Image Semantic Alignment Does Not Depend on Any Prior Classification Tasks</div><div class="cpsessionviewpaperauthor">[[Masood S. Mortazavi|AUTHOR Masood S. Mortazavi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3146.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-9|PAPER Wed-3-12-9 — Lip Graph Assisted Audio-Visual Speech Recognition Using Bidirectional Synchronous Fusion]]</div>|<div class="cpsessionviewpapertitle">Lip Graph Assisted Audio-Visual Speech Recognition Using Bidirectional Synchronous Fusion</div><div class="cpsessionviewpaperauthor">[[Hong Liu|AUTHOR Hong Liu]], [[Zhan Chen|AUTHOR Zhan Chen]], [[Bing Yang|AUTHOR Bing Yang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3157.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-12-10|PAPER Wed-3-12-10 — Caption Alignment for Low Resource Audio-Visual Data]]</div>|<div class="cpsessionviewpapertitle">Caption Alignment for Low Resource Audio-Visual Data</div><div class="cpsessionviewpaperauthor">[[Vighnesh Reddy Konda|AUTHOR Vighnesh Reddy Konda]], [[Mayur Warialani|AUTHOR Mayur Warialani]], [[Rakesh Prasanth Achari|AUTHOR Rakesh Prasanth Achari]], [[Varad Bhatnagar|AUTHOR Varad Bhatnagar]], [[Jayaprakash Akula|AUTHOR Jayaprakash Akula]], [[Preethi Jyothi|AUTHOR Preethi Jyothi]], [[Ganesh Ramakrishnan|AUTHOR Ganesh Ramakrishnan]], [[Gholamreza Haffari|AUTHOR Gholamreza Haffari]], [[Pankaj Singh|AUTHOR Pankaj Singh]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 2|<|
|^Chairs:&nbsp;|^Leibny Paola García-Perera|
|^&nbsp;|^Yawen Xue|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1108.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-1|PAPER Wed-3-2-1 — Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data]]</div>|<div class="cpsessionviewpapertitle">Partial AUC Optimisation Using Recurrent Neural Networks for Music Detection with Limited Training Data</div><div class="cpsessionviewpaperauthor">[[Pablo Gimeno|AUTHOR Pablo Gimeno]], [[Victoria Mingote|AUTHOR Victoria Mingote]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1690.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-2|PAPER Wed-3-2-2 — An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings]]</div>|<div class="cpsessionviewpapertitle">An Open-Source Voice Type Classifier for Child-Centered Daylong Recordings</div><div class="cpsessionviewpaperauthor">[[Marvin Lavechin|AUTHOR Marvin Lavechin]], [[Ruben Bousbib|AUTHOR Ruben Bousbib]], [[Hervé Bredin|AUTHOR Hervé Bredin]], [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]], [[Alejandrina Cristia|AUTHOR Alejandrina Cristia]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-3|PAPER Wed-3-2-3 — Competing Speaker Count Estimation on the Fusion of the Spectral and Spatial Embedding Space]]</div>|<div class="cpsessionviewpapertitle">Competing Speaker Count Estimation on the Fusion of the Spectral and Spatial Embedding Space</div><div class="cpsessionviewpaperauthor">[[Chao Peng|AUTHOR Chao Peng]], [[Xihong Wu|AUTHOR Xihong Wu]], [[Tianshu Qu|AUTHOR Tianshu Qu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1969.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-4|PAPER Wed-3-2-4 — Audio-Visual Multi-Speaker Tracking Based on the GLMB Framework]]</div>|<div class="cpsessionviewpapertitle">Audio-Visual Multi-Speaker Tracking Based on the GLMB Framework</div><div class="cpsessionviewpaperauthor">[[Shoufeng Lin|AUTHOR Shoufeng Lin]], [[Xinyuan Qian|AUTHOR Xinyuan Qian]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2365.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-5|PAPER Wed-3-2-5 — Towards Speech Robustness for Acoustic Scene Classification]]</div>|<div class="cpsessionviewpapertitle">Towards Speech Robustness for Acoustic Scene Classification</div><div class="cpsessionviewpaperauthor">[[Shuo Liu|AUTHOR Shuo Liu]], [[Andreas Triantafyllopoulos|AUTHOR Andreas Triantafyllopoulos]], [[Zhao Ren|AUTHOR Zhao Ren]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2430.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-6|PAPER Wed-3-2-6 — Identify Speakers in Cocktail Parties with End-to-End Attention]]</div>|<div class="cpsessionviewpapertitle">Identify Speakers in Cocktail Parties with End-to-End Attention</div><div class="cpsessionviewpaperauthor">[[Junzhe Zhu|AUTHOR Junzhe Zhu]], [[Mark Hasegawa-Johnson|AUTHOR Mark Hasegawa-Johnson]], [[Leda Sarı|AUTHOR Leda Sarı]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2519.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-7|PAPER Wed-3-2-7 — Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR]]</div>|<div class="cpsessionviewpapertitle">Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR</div><div class="cpsessionviewpaperauthor">[[Thilo von Neumann|AUTHOR Thilo von Neumann]], [[Christoph Boeddeker|AUTHOR Christoph Boeddeker]], [[Lukas Drude|AUTHOR Lukas Drude]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2585.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-8|PAPER Wed-3-2-8 — Attentive Convolutional Recurrent Neural Network Using Phoneme-Level Acoustic Representation for Rare Sound Event Detection]]</div>|<div class="cpsessionviewpapertitle">Attentive Convolutional Recurrent Neural Network Using Phoneme-Level Acoustic Representation for Rare Sound Event Detection</div><div class="cpsessionviewpaperauthor">[[Shreya G. Upadhyay|AUTHOR Shreya G. Upadhyay]], [[Bo-Hao Su|AUTHOR Bo-Hao Su]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2671.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-9|PAPER Wed-3-2-9 — Detecting and Counting Overlapping Speakers in Distant Speech Scenarios]]</div>|<div class="cpsessionviewpapertitle">Detecting and Counting Overlapping Speakers in Distant Speech Scenarios</div><div class="cpsessionviewpaperauthor">[[Samuele Cornell|AUTHOR Samuele Cornell]], [[Maurizio Omologo|AUTHOR Maurizio Omologo]], [[Stefano Squartini|AUTHOR Stefano Squartini]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2757.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-2-10|PAPER Wed-3-2-10 — All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection]]</div>|<div class="cpsessionviewpapertitle">All-in-One Transformer: Unifying Speech Recognition, Audio Tagging, and Event Detection</div><div class="cpsessionviewpaperauthor">[[Niko Moritz|AUTHOR Niko Moritz]], [[Gordon Wichern|AUTHOR Gordon Wichern]], [[Takaaki Hori|AUTHOR Takaaki Hori]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 3|<|
|^Chairs:&nbsp;|^Ziping Zhao|
|^&nbsp;|^Xingfeng Li|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2848.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-1|PAPER Wed-3-3-1 — Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals]]</div>|<div class="cpsessionviewpapertitle">Towards Silent Paralinguistics: Deriving Speaking Mode and Speaker ID from Electromyographic Signals</div><div class="cpsessionviewpaperauthor">[[Lorenz Diener|AUTHOR Lorenz Diener]], [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]], [[Catarina Botelho|AUTHOR Catarina Botelho]], [[Kevin Scheck|AUTHOR Kevin Scheck]], [[Dennis Küster|AUTHOR Dennis Küster]], [[Isabel Trancoso|AUTHOR Isabel Trancoso]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]], [[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1698.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-2|PAPER Wed-3-3-2 — Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction]]</div>|<div class="cpsessionviewpapertitle">Predicting Collaborative Task Performance Using Graph Interlocutor Acoustic Network in Small Group Interaction</div><div class="cpsessionviewpaperauthor">[[Shun-Chang Zhong|AUTHOR Shun-Chang Zhong]], [[Bo-Hao Su|AUTHOR Bo-Hao Su]], [[Wei Huang|AUTHOR Wei Huang]], [[Yi-Ching Liu|AUTHOR Yi-Ching Liu]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2349.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-3|PAPER Wed-3-3-3 — Very Short-Term Conflict Intensity Estimation Using Fisher Vectors]]</div>|<div class="cpsessionviewpapertitle">Very Short-Term Conflict Intensity Estimation Using Fisher Vectors</div><div class="cpsessionviewpaperauthor">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2553.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-4|PAPER Wed-3-3-4 — Gaming Corpus for Studying Social Screams]]</div>|<div class="cpsessionviewpapertitle">Gaming Corpus for Studying Social Screams</div><div class="cpsessionviewpaperauthor">[[Hiroki Mori|AUTHOR Hiroki Mori]], [[Yuki Kikuchi|AUTHOR Yuki Kikuchi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-5|PAPER Wed-3-3-5 — Speaker Discrimination in Humans and Machines: Effects of Speaking Style Variability]]</div>|<div class="cpsessionviewpapertitle">Speaker Discrimination in Humans and Machines: Effects of Speaking Style Variability</div><div class="cpsessionviewpaperauthor">[[Amber Afshan|AUTHOR Amber Afshan]], [[Jody Kreiman|AUTHOR Jody Kreiman]], [[Abeer Alwan|AUTHOR Abeer Alwan]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2276.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-6|PAPER Wed-3-3-6 — Automatic Prediction of Confidence Level from Children’s Oral Reading Recordings]]</div>|<div class="cpsessionviewpapertitle">Automatic Prediction of Confidence Level from Children’s Oral Reading Recordings</div><div class="cpsessionviewpaperauthor">[[Kamini Sabu|AUTHOR Kamini Sabu]], [[Preeti Rao|AUTHOR Preeti Rao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2693.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-7|PAPER Wed-3-3-7 — Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech]]</div>|<div class="cpsessionviewpapertitle">Towards a Comprehensive Assessment of Speech Intelligibility for Pathological Speech</div><div class="cpsessionviewpaperauthor">[[W. Xue|AUTHOR W. Xue]], [[V. Mendoza Ramos|AUTHOR V. Mendoza Ramos]], [[W. Harmsen|AUTHOR W. Harmsen]], [[Catia Cucchiarini|AUTHOR Catia Cucchiarini]], [[R.W.N.M. van Hout|AUTHOR R.W.N.M. van Hout]], [[Helmer Strik|AUTHOR Helmer Strik]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1498.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-8|PAPER Wed-3-3-8 — Effects of Communication Channels and Actor’s Gender on Emotion Identification by Native Mandarin Speakers]]</div>|<div class="cpsessionviewpapertitle">Effects of Communication Channels and Actor’s Gender on Emotion Identification by Native Mandarin Speakers</div><div class="cpsessionviewpaperauthor">[[Yi Lin|AUTHOR Yi Lin]], [[Hongwei Ding|AUTHOR Hongwei Ding]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2821.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-3-9|PAPER Wed-3-3-9 — Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor]]</div>|<div class="cpsessionviewpapertitle">Detection of Voicing and Place of Articulation of Fricatives with Deep Learning in a Virtual Speech and Language Therapy Tutor</div><div class="cpsessionviewpaperauthor">[[Ivo Anjos|AUTHOR Ivo Anjos]], [[Maxine Eskenazi|AUTHOR Maxine Eskenazi]], [[Nuno Marques|AUTHOR Nuno Marques]], [[Margarida Grilo|AUTHOR Margarida Grilo]], [[Isabel Guimarães|AUTHOR Isabel Guimarães]], [[João Magalhães|AUTHOR João Magalhães]], [[Sofia Cavaco|AUTHOR Sofia Cavaco]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Sebastien Le Maguer|
|^&nbsp;|^Julia Hirschberg|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1403.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-1|PAPER Wed-3-4-1 — Unsupervised Learning for Sequence-to-Sequence Text-to-Speech for Low-Resource Languages]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Learning for Sequence-to-Sequence Text-to-Speech for Low-Resource Languages</div><div class="cpsessionviewpaperauthor">[[Haitong Zhang|AUTHOR Haitong Zhang]], [[Yue Lin|AUTHOR Yue Lin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1461.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-2|PAPER Wed-3-4-2 — Conditional Spoken Digit Generation with StyleGAN]]</div>|<div class="cpsessionviewpapertitle">Conditional Spoken Digit Generation with StyleGAN</div><div class="cpsessionviewpaperauthor">[[Kasperi Palkama|AUTHOR Kasperi Palkama]], [[Lauri Juvela|AUTHOR Lauri Juvela]], [[Alexander Ilin|AUTHOR Alexander Ilin]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1590.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-3|PAPER Wed-3-4-3 — Towards Universal Text-to-Speech]]</div>|<div class="cpsessionviewpapertitle">Towards Universal Text-to-Speech</div><div class="cpsessionviewpaperauthor">[[Jingzhou Yang|AUTHOR Jingzhou Yang]], [[Lei He|AUTHOR Lei He]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1630.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-4|PAPER Wed-3-4-4 — Speaker-Independent Mel-Cepstrum Estimation from Articulator Movements Using D-Vector Input]]</div>|<div class="cpsessionviewpapertitle">Speaker-Independent Mel-Cepstrum Estimation from Articulator Movements Using D-Vector Input</div><div class="cpsessionviewpaperauthor">[[Kouichi Katsurada|AUTHOR Kouichi Katsurada]], [[Korin Richmond|AUTHOR Korin Richmond]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1751.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-5|PAPER Wed-3-4-5 — Enhancing Monotonicity for Robust Autoregressive Transformer TTS]]</div>|<div class="cpsessionviewpapertitle">Enhancing Monotonicity for Robust Autoregressive Transformer TTS</div><div class="cpsessionviewpaperauthor">[[Xiangyu Liang|AUTHOR Xiangyu Liang]], [[Zhiyong Wu|AUTHOR Zhiyong Wu]], [[Runnan Li|AUTHOR Runnan Li]], [[Yanqing Liu|AUTHOR Yanqing Liu]], [[Sheng Zhao|AUTHOR Sheng Zhao]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-6|PAPER Wed-3-4-6 — Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning]]</div>|<div class="cpsessionviewpapertitle">Incremental Text to Speech for Neural Sequence-to-Sequence Models Using Reinforcement Learning</div><div class="cpsessionviewpaperauthor">[[Devang S. Ram Mohan|AUTHOR Devang S. Ram Mohan]], [[Raphael Lenain|AUTHOR Raphael Lenain]], [[Lorenzo Foglianti|AUTHOR Lorenzo Foglianti]], [[Tian Huey Teh|AUTHOR Tian Huey Teh]], [[Marlene Staib|AUTHOR Marlene Staib]], [[Alexandra Torresquintero|AUTHOR Alexandra Torresquintero]], [[Jiameng Gao|AUTHOR Jiameng Gao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1824.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-7|PAPER Wed-3-4-7 — Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Learning for Multi-Speaker Text-to-Speech Synthesis Using Discrete Speech Representation</div><div class="cpsessionviewpaperauthor">[[Tao Tu|AUTHOR Tao Tu]], [[Yuan-Jui Chen|AUTHOR Yuan-Jui Chen]], [[Alexander H. Liu|AUTHOR Alexander H. Liu]], [[Hung-yi Lee|AUTHOR Hung-yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2004.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-8|PAPER Wed-3-4-8 — Learning Joint Articulatory-Acoustic Representations with Normalizing Flows]]</div>|<div class="cpsessionviewpapertitle">Learning Joint Articulatory-Acoustic Representations with Normalizing Flows</div><div class="cpsessionviewpaperauthor">[[Pramit Saha|AUTHOR Pramit Saha]], [[Sidney Fels|AUTHOR Sidney Fels]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2469.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-9|PAPER Wed-3-4-9 — Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Investigating Effective Additional Contextual Factors in DNN-Based Spontaneous Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Yuki Yamashita|AUTHOR Yuki Yamashita]], [[Tomoki Koriyama|AUTHOR Tomoki Koriyama]], [[Yuki Saito|AUTHOR Yuki Saito]], [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]], [[Yusuke Ijima|AUTHOR Yusuke Ijima]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Hiroshi Saruwatari|AUTHOR Hiroshi Saruwatari]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2558.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-4-10|PAPER Wed-3-4-10 — Hider-Finder-Combiner: An Adversarial Architecture for General Speech Signal Modification]]</div>|<div class="cpsessionviewpapertitle">Hider-Finder-Combiner: An Adversarial Architecture for General Speech Signal Modification</div><div class="cpsessionviewpaperauthor">[[Jacob J. Webber|AUTHOR Jacob J. Webber]], [[Olivier Perrotin|AUTHOR Olivier Perrotin]], [[Simon King|AUTHOR Simon King]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 5|<|
|^Chairs:&nbsp;|^Man Wai Mak|
|^&nbsp;|^Jesús Villalba|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1287.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-1|PAPER Wed-3-5-1 — Wav2Spk: A Simple DNN Architecture for Learning Speaker Embeddings from Waveforms]]</div>|<div class="cpsessionviewpapertitle">Wav2Spk: A Simple DNN Architecture for Learning Speaker Embeddings from Waveforms</div><div class="cpsessionviewpaperauthor">[[Weiwei Lin|AUTHOR Weiwei Lin]], [[Man-Wai Mak|AUTHOR Man-Wai Mak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1395.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-2|PAPER Wed-3-5-2 — How Does Label Noise Affect the Quality of Speaker Embeddings?]]</div>|<div class="cpsessionviewpapertitle">How Does Label Noise Affect the Quality of Speaker Embeddings?</div><div class="cpsessionviewpaperauthor">[[Minh Pham|AUTHOR Minh Pham]], [[Zeqian Li|AUTHOR Zeqian Li]], [[Jacob Whitehill|AUTHOR Jacob Whitehill]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1765.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-3|PAPER Wed-3-5-3 — A Comparative Re-Assessment of Feature Extractors for Deep Speaker Embeddings]]</div>|<div class="cpsessionviewpapertitle">A Comparative Re-Assessment of Feature Extractors for Deep Speaker Embeddings</div><div class="cpsessionviewpaperauthor">[[Xuechen Liu|AUTHOR Xuechen Liu]], [[Md. Sahidullah|AUTHOR Md. Sahidullah]], [[Tomi Kinnunen|AUTHOR Tomi Kinnunen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1845.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-4|PAPER Wed-3-5-4 — Speaker Representation Learning Using Global Context Guided Channel and Time-Frequency Transformations]]</div>|<div class="cpsessionviewpapertitle">Speaker Representation Learning Using Global Context Guided Channel and Time-Frequency Transformations</div><div class="cpsessionviewpaperauthor">[[Wei Xia|AUTHOR Wei Xia]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2075.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-5|PAPER Wed-3-5-5 — Intra-Class Variation Reduction of Speaker Representation in Disentanglement Framework]]</div>|<div class="cpsessionviewpapertitle">Intra-Class Variation Reduction of Speaker Representation in Disentanglement Framework</div><div class="cpsessionviewpaperauthor">[[Yoohwan Kwon|AUTHOR Yoohwan Kwon]], [[Soo-Whan Chung|AUTHOR Soo-Whan Chung]], [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2106.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-6|PAPER Wed-3-5-6 — Compact Speaker Embedding: lrx-Vector]]</div>|<div class="cpsessionviewpapertitle">Compact Speaker Embedding: lrx-Vector</div><div class="cpsessionviewpaperauthor">[[Munir Georges|AUTHOR Munir Georges]], [[Jonathan Huang|AUTHOR Jonathan Huang]], [[Tobias Bocklet|AUTHOR Tobias Bocklet]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2270.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-7|PAPER Wed-3-5-7 — Cosine-Distance Virtual Adversarial Training for Semi-Supervised Speaker-Discriminative Acoustic Embeddings]]</div>|<div class="cpsessionviewpapertitle">Cosine-Distance Virtual Adversarial Training for Semi-Supervised Speaker-Discriminative Acoustic Embeddings</div><div class="cpsessionviewpaperauthor">[[Florian L. Kreyssig|AUTHOR Florian L. Kreyssig]], [[Philip C. Woodland|AUTHOR Philip C. Woodland]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2470.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-8|PAPER Wed-3-5-8 — Deep Speaker Embedding with Long Short Term Centroid Learning for Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Deep Speaker Embedding with Long Short Term Centroid Learning for Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Junyi Peng|AUTHOR Junyi Peng]], [[Rongzhi Gu|AUTHOR Rongzhi Gu]], [[Yuexian Zou|AUTHOR Yuexian Zou]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2542.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-9|PAPER Wed-3-5-9 — Neural Discriminant Analysis for Deep Speaker Embedding]]</div>|<div class="cpsessionviewpapertitle">Neural Discriminant Analysis for Deep Speaker Embedding</div><div class="cpsessionviewpaperauthor">[[Lantian Li|AUTHOR Lantian Li]], [[Dong Wang|AUTHOR Dong Wang]], [[Thomas Fang Zheng|AUTHOR Thomas Fang Zheng]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2970.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-5-10|PAPER Wed-3-5-10 — Learning Speaker Embedding from Text-to-Speech]]</div>|<div class="cpsessionviewpapertitle">Learning Speaker Embedding from Text-to-Speech</div><div class="cpsessionviewpaperauthor">[[Jaejin Cho|AUTHOR Jaejin Cho]], [[Piotr Żelasko|AUTHOR Piotr Żelasko]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Hanseok Ko|
|^&nbsp;|^Zhao Lv|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2952.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-7-1|PAPER Wed-3-7-1 — Noisy-Reverberant Speech Enhancement Using DenseUNet with Time-Frequency Attention]]</div>|<div class="cpsessionviewpapertitle">Noisy-Reverberant Speech Enhancement Using DenseUNet with Time-Frequency Attention</div><div class="cpsessionviewpaperauthor">[[Yan Zhao|AUTHOR Yan Zhao]], [[DeLiang Wang|AUTHOR DeLiang Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1169.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-7-2|PAPER Wed-3-7-2 — On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems]]</div>|<div class="cpsessionviewpapertitle">On Loss Functions and Recurrency Training for GAN-Based Speech Enhancement Systems</div><div class="cpsessionviewpaperauthor">[[Zhuohuang Zhang|AUTHOR Zhuohuang Zhang]], [[Chengyun Deng|AUTHOR Chengyun Deng]], [[Yi Shen|AUTHOR Yi Shen]], [[Donald S. Williamson|AUTHOR Donald S. Williamson]], [[Yongtao Sha|AUTHOR Yongtao Sha]], [[Yi Zhang|AUTHOR Yi Zhang]], [[Hui Song|AUTHOR Hui Song]], [[Xiangang Li|AUTHOR Xiangang Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-7-3|PAPER Wed-3-7-3 — Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Self-Supervised Adversarial Multi-Task Learning for Vocoder-Based Monaural Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Zhihao Du|AUTHOR Zhihao Du]], [[Ming Lei|AUTHOR Ming Lei]], [[Jiqing Han|AUTHOR Jiqing Han]], [[Shiliang Zhang|AUTHOR Shiliang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1532.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-7-4|PAPER Wed-3-7-4 — Deep Speech Inpainting of Time-Frequency Masks]]</div>|<div class="cpsessionviewpapertitle">Deep Speech Inpainting of Time-Frequency Masks</div><div class="cpsessionviewpaperauthor">[[Mikolaj Kegler|AUTHOR Mikolaj Kegler]], [[Pierre Beckmann|AUTHOR Pierre Beckmann]], [[Milos Cernak|AUTHOR Milos Cernak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1901.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-7-5|PAPER Wed-3-7-5 — Real-Time Single-Channel Deep Neural Network-Based Speech Enhancement on Edge Devices]]</div>|<div class="cpsessionviewpapertitle">Real-Time Single-Channel Deep Neural Network-Based Speech Enhancement on Edge Devices</div><div class="cpsessionviewpaperauthor">[[Nikhil Shankar|AUTHOR Nikhil Shankar]], [[Gautam Shreedhar Bhat|AUTHOR Gautam Shreedhar Bhat]], [[Issa M.S. Panahi|AUTHOR Issa M.S. Panahi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-7-6|PAPER Wed-3-7-6 — Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning]]</div>|<div class="cpsessionviewpapertitle">Improved Speech Enhancement Using a Time-Domain GAN with Mask Learning</div><div class="cpsessionviewpaperauthor">[[Ju Lin|AUTHOR Ju Lin]], [[Sufeng Niu|AUTHOR Sufeng Niu]], [[Adriaan J. van Wijngaarden|AUTHOR Adriaan J. van Wijngaarden]], [[Jerome L. McClendon|AUTHOR Jerome L. McClendon]], [[Melissa C. Smith|AUTHOR Melissa C. Smith]], [[Kuang-Ching Wang|AUTHOR Kuang-Ching Wang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2409.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-7-8|PAPER Wed-3-7-8 — Real Time Speech Enhancement in the Waveform Domain]]</div>|<div class="cpsessionviewpapertitle">Real Time Speech Enhancement in the Waveform Domain</div><div class="cpsessionviewpaperauthor">[[Alexandre Défossez|AUTHOR Alexandre Défossez]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Yossi Adi|AUTHOR Yossi Adi]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2443.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-7-9|PAPER Wed-3-7-9 — Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks]]</div>|<div class="cpsessionviewpapertitle">Efficient Low-Latency Speech Enhancement with Mobile Audio Streaming Networks</div><div class="cpsessionviewpaperauthor">[[Michal Romaniuk|AUTHOR Michal Romaniuk]], [[Piotr Masztalski|AUTHOR Piotr Masztalski]], [[Karol Piaskowski|AUTHOR Karol Piaskowski]], [[Mateusz Matuszewski|AUTHOR Mateusz Matuszewski]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 8|<|
|^Chairs:&nbsp;|^Xianxian Zhang|
|^&nbsp;|^Keisuke Kinoshita|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1199.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-1|PAPER Wed-3-8-1 — Multi-Stream Attention-Based BLSTM with Feature Segmentation for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Multi-Stream Attention-Based BLSTM with Feature Segmentation for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Yuya Chiba|AUTHOR Yuya Chiba]], [[Takashi Nose|AUTHOR Takashi Nose]], [[Akinori Ito|AUTHOR Akinori Ito]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1351.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-2|PAPER Wed-3-8-2 — Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers]]</div>|<div class="cpsessionviewpapertitle">Microphone Array Post-Filter for Target Speech Enhancement Without a Prior Information of Point Interferers</div><div class="cpsessionviewpaperauthor">[[Guanjun Li|AUTHOR Guanjun Li]], [[Shan Liang|AUTHOR Shan Liang]], [[Shuai Nie|AUTHOR Shuai Nie]], [[Wenju Liu|AUTHOR Wenju Liu]], [[Zhanlei Yang|AUTHOR Zhanlei Yang]], [[Longshuai Xiao|AUTHOR Longshuai Xiao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1365.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-3|PAPER Wed-3-8-3 — Similarity-and-Independence-Aware Beamformer: Method for Target Source Extraction Using Magnitude Spectrogram as Reference]]</div>|<div class="cpsessionviewpapertitle">Similarity-and-Independence-Aware Beamformer: Method for Target Source Extraction Using Magnitude Spectrogram as Reference</div><div class="cpsessionviewpaperauthor">[[Atsuo Hiroe|AUTHOR Atsuo Hiroe]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1409.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-4|PAPER Wed-3-8-4 — The Method of Random Directions Optimization for Stereo Audio Source Separation]]</div>|<div class="cpsessionviewpapertitle">The Method of Random Directions Optimization for Stereo Audio Source Separation</div><div class="cpsessionviewpaperauthor">[[Oleg Golokolenko|AUTHOR Oleg Golokolenko]], [[Gerald Schuller|AUTHOR Gerald Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1548.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-5|PAPER Wed-3-8-5 — Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations]]</div>|<div class="cpsessionviewpapertitle">Gated Recurrent Fusion of Spatial and Spectral Features for Multi-Channel Speech Separation with Deep Embedding Representations</div><div class="cpsessionviewpaperauthor">[[Cunhang Fan|AUTHOR Cunhang Fan]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2158.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-6|PAPER Wed-3-8-6 — Generalized Minimal Distortion Principle for Blind Source Separation]]</div>|<div class="cpsessionviewpapertitle">Generalized Minimal Distortion Principle for Blind Source Separation</div><div class="cpsessionviewpaperauthor">[[Robin Scheibler|AUTHOR Robin Scheibler]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2408.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-7|PAPER Wed-3-8-7 — A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">A Lightweight Model Based on Separable Convolution for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Ying Zhong|AUTHOR Ying Zhong]], [[Ying Hu|AUTHOR Ying Hu]], [[Hao Huang|AUTHOR Hao Huang]], [[Wushour Silamu|AUTHOR Wushour Silamu]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2624.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-8|PAPER Wed-3-8-8 — Meta Multi-Task Learning for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Meta Multi-Task Learning for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Ruichu Cai|AUTHOR Ruichu Cai]], [[Kaibin Guo|AUTHOR Kaibin Guo]], [[Boyan Xu|AUTHOR Boyan Xu]], [[Xiaoyan Yang|AUTHOR Xiaoyan Yang]], [[Zhenjie Zhang|AUTHOR Zhenjie Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2687.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-8-9|PAPER Wed-3-8-9 — GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones]]</div>|<div class="cpsessionviewpapertitle">GEV Beamforming Supported by DOA-Based Masks Generated on Pairs of Microphones</div><div class="cpsessionviewpaperauthor">[[François Grondin|AUTHOR François Grondin]], [[Jean-Samuel Lauzon|AUTHOR Jean-Samuel Lauzon]], [[Jonathan Vincent|AUTHOR Jonathan Vincent]], [[François Michaud|AUTHOR François Michaud]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 9|<|
|^Chairs:&nbsp;|^Yanmin Qian|
|^&nbsp;|^Chanwoo Kim|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1491.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-1|PAPER Wed-3-9-1 — Accurate Detection of Wake Word Start and End Using a CNN]]</div>|<div class="cpsessionviewpapertitle">Accurate Detection of Wake Word Start and End Using a CNN</div><div class="cpsessionviewpaperauthor">[[Christin Jose|AUTHOR Christin Jose]], [[Yuriy Mishchenko|AUTHOR Yuriy Mishchenko]], [[Thibaud Sénéchal|AUTHOR Thibaud Sénéchal]], [[Anish Shah|AUTHOR Anish Shah]], [[Alex Escott|AUTHOR Alex Escott]], [[Shiv Naga Prasad Vitaladevuni|AUTHOR Shiv Naga Prasad Vitaladevuni]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1330.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-2|PAPER Wed-3-9-2 — Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering]]</div>|<div class="cpsessionviewpapertitle">Hybrid Transformer/CTC Networks for Hardware Efficient Voice Triggering</div><div class="cpsessionviewpaperauthor">[[Saurabh Adya|AUTHOR Saurabh Adya]], [[Vineet Garg|AUTHOR Vineet Garg]], [[Siddharth Sigtia|AUTHOR Siddharth Sigtia]], [[Pramod Simha|AUTHOR Pramod Simha]], [[Chandra Dhir|AUTHOR Chandra Dhir]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1058.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-3|PAPER Wed-3-9-3 — MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition]]</div>|<div class="cpsessionviewpapertitle">MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition</div><div class="cpsessionviewpaperauthor">[[Somshubra Majumdar|AUTHOR Somshubra Majumdar]], [[Boris Ginsburg|AUTHOR Boris Ginsburg]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1894.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-4|PAPER Wed-3-9-4 — Iterative Compression of End-to-End ASR Model Using AutoML]]</div>|<div class="cpsessionviewpapertitle">Iterative Compression of End-to-End ASR Model Using AutoML</div><div class="cpsessionviewpaperauthor">[[Abhinav Mehrotra|AUTHOR Abhinav Mehrotra]], [[Łukasz Dudziak|AUTHOR Łukasz Dudziak]], [[Jinsu Yeo|AUTHOR Jinsu Yeo]], [[Young-yoon Lee|AUTHOR Young-yoon Lee]], [[Ravichander Vipperla|AUTHOR Ravichander Vipperla]], [[Mohamed S. Abdelfattah|AUTHOR Mohamed S. Abdelfattah]], [[Sourav Bhattacharya|AUTHOR Sourav Bhattacharya]], [[Samin Ishtiaq|AUTHOR Samin Ishtiaq]], [[Alberto Gil C.P. Ramos|AUTHOR Alberto Gil C.P. Ramos]], [[SangJeong Lee|AUTHOR SangJeong Lee]], [[Daehyun Kim|AUTHOR Daehyun Kim]], [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1991.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-5|PAPER Wed-3-9-5 — Quantization Aware Training with Absolute-Cosine Regularization for Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Quantization Aware Training with Absolute-Cosine Regularization for Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Hieu Duy Nguyen|AUTHOR Hieu Duy Nguyen]], [[Anastasios Alexandridis|AUTHOR Anastasios Alexandridis]], [[Athanasios Mouchtaris|AUTHOR Athanasios Mouchtaris]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3172.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-6|PAPER Wed-3-9-6 — Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing]]</div>|<div class="cpsessionviewpapertitle">Streaming On-Device End-to-End ASR System for Privacy-Sensitive Voice-Typing</div><div class="cpsessionviewpaperauthor">[[Abhinav Garg|AUTHOR Abhinav Garg]], [[Gowtham P. Vadisetti|AUTHOR Gowtham P. Vadisetti]], [[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]], [[Sichen Jin|AUTHOR Sichen Jin]], [[Aditya Jayasimha|AUTHOR Aditya Jayasimha]], [[Youngho Han|AUTHOR Youngho Han]], [[Jiyeon Kim|AUTHOR Jiyeon Kim]], [[Junmo Park|AUTHOR Junmo Park]], [[Kwangyoun Kim|AUTHOR Kwangyoun Kim]], [[Sooyeon Kim|AUTHOR Sooyeon Kim]], [[Young-yoon Lee|AUTHOR Young-yoon Lee]], [[Kyungbo Min|AUTHOR Kyungbo Min]], [[Chanwoo Kim|AUTHOR Chanwoo Kim]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2840.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-7|PAPER Wed-3-9-7 — Scaling Up Online Speech Recognition Using ConvNets]]</div>|<div class="cpsessionviewpapertitle">Scaling Up Online Speech Recognition Using ConvNets</div><div class="cpsessionviewpaperauthor">[[Vineel Pratap|AUTHOR Vineel Pratap]], [[Qiantong Xu|AUTHOR Qiantong Xu]], [[Jacob Kahn|AUTHOR Jacob Kahn]], [[Gilad Avidov|AUTHOR Gilad Avidov]], [[Tatiana Likhomanenko|AUTHOR Tatiana Likhomanenko]], [[Awni Hannun|AUTHOR Awni Hannun]], [[Vitaliy Liptchinsky|AUTHOR Vitaliy Liptchinsky]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1600.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-8|PAPER Wed-3-9-8 — Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Listen Attentively, and Spell Once: Whole Sentence Generation via a Non-Autoregressive Architecture for Low-Latency Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Ye Bai|AUTHOR Ye Bai]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Shuai Zhang|AUTHOR Shuai Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1939.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-3-9-9|PAPER Wed-3-9-9 — Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models]]</div>|<div class="cpsessionviewpapertitle">Rescore in a Flash: Compact, Cache Efficient Hashing Data Structures for n-Gram Language Models</div><div class="cpsessionviewpaperauthor">[[Grant P. Strimel|AUTHOR Grant P. Strimel]], [[Ariya Rastrow|AUTHOR Ariya Rastrow]], [[Gautam Tiwari|AUTHOR Gautam Tiwari]], [[Adrien Piérard|AUTHOR Adrien Piérard]], [[Jon Webb|AUTHOR Jon Webb]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 12|<|
|^Chairs:&nbsp;|^Ross Cutler|
|^&nbsp;|^Chandan Karadagur Ananda Reddy|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2091.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-12-1|PAPER Wed-SS-1-12-1 — Online Monaural Speech Enhancement Using Delayed Subband LSTM]]</div>|<div class="cpsessionviewpapertitle">Online Monaural Speech Enhancement Using Delayed Subband LSTM</div><div class="cpsessionviewpaperauthor">[[Xiaofei Li|AUTHOR Xiaofei Li]], [[Radu Horaud|AUTHOR Radu Horaud]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2439.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-12-2|PAPER Wed-SS-1-12-2 — INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising]]</div>|<div class="cpsessionviewpapertitle">INTERSPEECH 2020 Deep Noise Suppression Challenge: A Fully Convolutional Recurrent Network (FCRN) for Joint Dereverberation and Denoising</div><div class="cpsessionviewpaperauthor">[[Maximilian Strake|AUTHOR Maximilian Strake]], [[Bruno Defraene|AUTHOR Bruno Defraene]], [[Kristoff Fluyt|AUTHOR Kristoff Fluyt]], [[Wouter Tirry|AUTHOR Wouter Tirry]], [[Tim Fingscheidt|AUTHOR Tim Fingscheidt]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2537.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-12-3|PAPER Wed-SS-1-12-3 — DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Yanxin Hu|AUTHOR Yanxin Hu]], [[Yun Liu|AUTHOR Yun Liu]], [[Shubo Lv|AUTHOR Shubo Lv]], [[Mengtao Xing|AUTHOR Mengtao Xing]], [[Shimin Zhang|AUTHOR Shimin Zhang]], [[Yihui Fu|AUTHOR Yihui Fu]], [[Jian Wu|AUTHOR Jian Wu]], [[Bihong Zhang|AUTHOR Bihong Zhang]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2631.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-12-4|PAPER Wed-SS-1-12-4 — Dual-Signal Transformation LSTM Network for Real-Time Noise Suppression]]</div>|<div class="cpsessionviewpapertitle">Dual-Signal Transformation LSTM Network for Real-Time Noise Suppression</div><div class="cpsessionviewpaperauthor">[[Nils L. Westhausen|AUTHOR Nils L. Westhausen]], [[Bernd T. Meyer|AUTHOR Bernd T. Meyer]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2730.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-12-5|PAPER Wed-SS-1-12-5 — A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech]]</div>|<div class="cpsessionviewpapertitle">A Perceptually-Motivated Approach for Low-Complexity, Real-Time Enhancement of Fullband Speech</div><div class="cpsessionviewpaperauthor">[[Jean-Marc Valin|AUTHOR Jean-Marc Valin]], [[Umut Isik|AUTHOR Umut Isik]], [[Neerad Phansalkar|AUTHOR Neerad Phansalkar]], [[Ritwik Giri|AUTHOR Ritwik Giri]], [[Karim Helwani|AUTHOR Karim Helwani]], [[Arvindh Krishnaswamy|AUTHOR Arvindh Krishnaswamy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3027.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-12-6|PAPER Wed-SS-1-12-6 — PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss]]</div>|<div class="cpsessionviewpapertitle">PoCoNet: Better Speech Enhancement with Frequency-Positional Embeddings, Semi-Supervised Conversational Data, and Biased Loss</div><div class="cpsessionviewpaperauthor">[[Umut Isik|AUTHOR Umut Isik]], [[Ritwik Giri|AUTHOR Ritwik Giri]], [[Neerad Phansalkar|AUTHOR Neerad Phansalkar]], [[Jean-Marc Valin|AUTHOR Jean-Marc Valin]], [[Karim Helwani|AUTHOR Karim Helwani]], [[Arvindh Krishnaswamy|AUTHOR Arvindh Krishnaswamy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3038.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-12-7|PAPER Wed-SS-1-12-7 — The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results]]</div>|<div class="cpsessionviewpapertitle">The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results</div><div class="cpsessionviewpaperauthor">[[Chandan K.A. Reddy|AUTHOR Chandan K.A. Reddy]], [[Vishak Gopal|AUTHOR Vishak Gopal]], [[Ross Cutler|AUTHOR Ross Cutler]], [[Ebrahim Beyrami|AUTHOR Ebrahim Beyrami]], [[Roger Cheng|AUTHOR Roger Cheng]], [[Harishchandra Dubey|AUTHOR Harishchandra Dubey]], [[Sergiy Matusevych|AUTHOR Sergiy Matusevych]], [[Robert Aichner|AUTHOR Robert Aichner]], [[Ashkan Aazami|AUTHOR Ashkan Aazami]], [[Sebastian Braun|AUTHOR Sebastian Braun]], [[Puneet Rana|AUTHOR Puneet Rana]], [[Sriram Srinivasan|AUTHOR Sriram Srinivasan]], [[Johannes Gehrke|AUTHOR Johannes Gehrke]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 4|<|
|^Chairs:&nbsp;|^Björn Schuller|
|^&nbsp;|^Anton Batliner|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/0032.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-1|PAPER Wed-SS-1-4-1 — The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks]]</div>|<div class="cpsessionviewpapertitle">The INTERSPEECH 2020 Computational Paralinguistics Challenge: Elderly Emotion, Breathing & Masks</div><div class="cpsessionviewpaperauthor">[[Björn W. Schuller|AUTHOR Björn W. Schuller]], [[Anton Batliner|AUTHOR Anton Batliner]], [[Christian Bergler|AUTHOR Christian Bergler]], [[Eva-Maria Messner|AUTHOR Eva-Maria Messner]], [[Antonia Hamilton|AUTHOR Antonia Hamilton]], [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]], [[Alice Baird|AUTHOR Alice Baird]], [[Georgios Rizos|AUTHOR Georgios Rizos]], [[Maximilian Schmitt|AUTHOR Maximilian Schmitt]], [[Lukas Stappen|AUTHOR Lukas Stappen]], [[Harald Baumeister|AUTHOR Harald Baumeister]], [[Alexis Deighton MacIntyre|AUTHOR Alexis Deighton MacIntyre]], [[Simone Hantke|AUTHOR Simone Hantke]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1552.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-2|PAPER Wed-SS-1-4-2 — Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task]]</div>|<div class="cpsessionviewpapertitle">Learning Higher Representations from Pre-Trained Deep Models with Data Augmentation for the COMPARE 2020 Challenge Mask Task</div><div class="cpsessionviewpaperauthor">[[Tomoya Koike|AUTHOR Tomoya Koike]], [[Kun Qian|AUTHOR Kun Qian]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]], [[Yoshiharu Yamamoto|AUTHOR Yoshiharu Yamamoto]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1692.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-3|PAPER Wed-SS-1-4-3 — Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms]]</div>|<div class="cpsessionviewpapertitle">Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms</div><div class="cpsessionviewpaperauthor">[[Steffen Illium|AUTHOR Steffen Illium]], [[Robert Müller|AUTHOR Robert Müller]], [[Andreas Sedlmeier|AUTHOR Andreas Sedlmeier]], [[Claudia Linnhoff-Popien|AUTHOR Claudia Linnhoff-Popien]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1723.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-4|PAPER Wed-SS-1-4-4 — Surgical Mask Detection with Deep Recurrent Phonetic Models]]</div>|<div class="cpsessionviewpapertitle">Surgical Mask Detection with Deep Recurrent Phonetic Models</div><div class="cpsessionviewpaperauthor">[[Philipp Klumpp|AUTHOR Philipp Klumpp]], [[Tomás Arias-Vergara|AUTHOR Tomás Arias-Vergara]], [[Juan Camilo Vásquez-Correa|AUTHOR Juan Camilo Vásquez-Correa]], [[Paula Andrea Pérez-Toro|AUTHOR Paula Andrea Pérez-Toro]], [[Florian Hönig|AUTHOR Florian Hönig]], [[Elmar Nöth|AUTHOR Elmar Nöth]], [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2243.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-5|PAPER Wed-SS-1-4-5 — Phonetic, Frame Clustering and Intelligibility Analyses for the INTERSPEECH 2020 ComParE Challenge]]</div>|<div class="cpsessionviewpapertitle">Phonetic, Frame Clustering and Intelligibility Analyses for the INTERSPEECH 2020 ComParE Challenge</div><div class="cpsessionviewpaperauthor">[[Claude Montacié|AUTHOR Claude Montacié]], [[Marie-José Caraty|AUTHOR Marie-José Caraty]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2290.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-6|PAPER Wed-SS-1-4-6 — Exploring Text and Audio Embeddings for Multi-Dimension Elderly Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Exploring Text and Audio Embeddings for Multi-Dimension Elderly Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Mariana Julião|AUTHOR Mariana Julião]], [[Alberto Abad|AUTHOR Alberto Abad]], [[Helena Moniz|AUTHOR Helena Moniz]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-7|PAPER Wed-SS-1-4-7 — Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges]]</div>|<div class="cpsessionviewpapertitle">Ensembling End-to-End Deep Models for Computational Paralinguistics Tasks: ComParE 2020 Mask and Breathing Sub-Challenges</div><div class="cpsessionviewpaperauthor">[[Maxim Markitantov|AUTHOR Maxim Markitantov]], [[Denis Dresvyanskiy|AUTHOR Denis Dresvyanskiy]], [[Danila Mamontov|AUTHOR Danila Mamontov]], [[Heysem Kaya|AUTHOR Heysem Kaya]], [[Wolfgang Minker|AUTHOR Wolfgang Minker]], [[Alexey Karpov|AUTHOR Alexey Karpov]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2778.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-8|PAPER Wed-SS-1-4-8 — Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge]]</div>|<div class="cpsessionviewpapertitle">Analyzing Breath Signals for the Interspeech 2020 ComParE Challenge</div><div class="cpsessionviewpaperauthor">[[John Mendonça|AUTHOR John Mendonça]], [[Francisco Teixeira|AUTHOR Francisco Teixeira]], [[Isabel Trancoso|AUTHOR Isabel Trancoso]], [[Alberto Abad|AUTHOR Alberto Abad]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2832.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-9|PAPER Wed-SS-1-4-9 — Deep Attentive End-to-End Continuous Breath Sensing from Speech]]</div>|<div class="cpsessionviewpapertitle">Deep Attentive End-to-End Continuous Breath Sensing from Speech</div><div class="cpsessionviewpaperauthor">[[Alexis Deighton MacIntyre|AUTHOR Alexis Deighton MacIntyre]], [[Georgios Rizos|AUTHOR Georgios Rizos]], [[Anton Batliner|AUTHOR Anton Batliner]], [[Alice Baird|AUTHOR Alice Baird]], [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]], [[Antonia Hamilton|AUTHOR Antonia Hamilton]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2857.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-10|PAPER Wed-SS-1-4-10 — Paralinguistic Classification of Mask Wearing by Image Classifiers and Fusion]]</div>|<div class="cpsessionviewpapertitle">Paralinguistic Classification of Mask Wearing by Image Classifiers and Fusion</div><div class="cpsessionviewpaperauthor">[[Jeno Szep|AUTHOR Jeno Szep]], [[Salim Hariri|AUTHOR Salim Hariri]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2999.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-11|PAPER Wed-SS-1-4-11 — Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge]]</div>|<div class="cpsessionviewpapertitle">Exploration of Acoustic and Lexical Cues for the INTERSPEECH 2020 Computational Paralinguistic Challenge</div><div class="cpsessionviewpaperauthor">[[Ziqing Yang|AUTHOR Ziqing Yang]], [[Zifan An|AUTHOR Zifan An]], [[Zehao Fan|AUTHOR Zehao Fan]], [[Chengye Jing|AUTHOR Chengye Jing]], [[Houwei Cao|AUTHOR Houwei Cao]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3160.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-12|PAPER Wed-SS-1-4-12 — Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Is Everything Fine, Grandma? Acoustic and Linguistic Modeling for Robust Elderly Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Gizem Soğancıoğlu|AUTHOR Gizem Soğancıoğlu]], [[Oxana Verkholyak|AUTHOR Oxana Verkholyak]], [[Heysem Kaya|AUTHOR Heysem Kaya]], [[Dmitrii Fedotov|AUTHOR Dmitrii Fedotov]], [[Tobias Cadée|AUTHOR Tobias Cadée]], [[Albert Ali Salah|AUTHOR Albert Ali Salah]], [[Alexey Karpov|AUTHOR Alexey Karpov]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1329.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-4-13|PAPER Wed-SS-1-4-13 — Are you Wearing a Mask? Improving Mask Detection from Speech Using Augmentation by Cycle-Consistent GANs]]</div>|<div class="cpsessionviewpapertitle">Are you Wearing a Mask? Improving Mask Detection from Speech Using Augmentation by Cycle-Consistent GANs</div><div class="cpsessionviewpaperauthor">[[Nicolae-Cătălin Ristea|AUTHOR Nicolae-Cătălin Ristea]], [[Radu Tudor Ionescu|AUTHOR Radu Tudor Ionescu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|19:15–20:15, Wednesday 28 October 2020, Room 6|<|
|^Chairs:&nbsp;|^Isabel Trancoso|
|^&nbsp;|^Nick Campbell|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2202.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-1|PAPER Wed-SS-1-6-1 — Tackling the ADReSS Challenge: A Multimodal Approach to the Automated Recognition of Alzheimer’s Dementia]]</div>|<div class="cpsessionviewpapertitle">Tackling the ADReSS Challenge: A Multimodal Approach to the Automated Recognition of Alzheimer’s Dementia</div><div class="cpsessionviewpaperauthor">[[Matej Martinc|AUTHOR Matej Martinc]], [[Senja Pollak|AUTHOR Senja Pollak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2516.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-2|PAPER Wed-SS-1-6-2 — Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease]]</div>|<div class="cpsessionviewpapertitle">Disfluencies and Fine-Tuning Pre-Trained Language Models for Detection of Alzheimer’s Disease</div><div class="cpsessionviewpaperauthor">[[Jiahong Yuan|AUTHOR Jiahong Yuan]], [[Yuchen Bian|AUTHOR Yuchen Bian]], [[Xingyu Cai|AUTHOR Xingyu Cai]], [[Jiaji Huang|AUTHOR Jiaji Huang]], [[Zheng Ye|AUTHOR Zheng Ye]], [[Kenneth Church|AUTHOR Kenneth Church]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2557.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-3|PAPER Wed-SS-1-6-3 — To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection]]</div>|<div class="cpsessionviewpapertitle">To BERT or not to BERT: Comparing Speech and Language-Based Approaches for Alzheimer’s Disease Detection</div><div class="cpsessionviewpaperauthor">[[Aparna Balagopalan|AUTHOR Aparna Balagopalan]], [[Benjamin Eyre|AUTHOR Benjamin Eyre]], [[Frank Rudzicz|AUTHOR Frank Rudzicz]], [[Jekaterina Novikova|AUTHOR Jekaterina Novikova]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2571.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-4|PAPER Wed-SS-1-6-4 — Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge]]</div>|<div class="cpsessionviewpapertitle">Alzheimer’s Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge</div><div class="cpsessionviewpaperauthor">[[Saturnino Luz|AUTHOR Saturnino Luz]], [[Fasih Haider|AUTHOR Fasih Haider]], [[Sofia de la Fuente|AUTHOR Sofia de la Fuente]], [[Davida Fromm|AUTHOR Davida Fromm]], [[Brian MacWhinney|AUTHOR Brian MacWhinney]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2587.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-5|PAPER Wed-SS-1-6-5 — Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity]]</div>|<div class="cpsessionviewpapertitle">Using State of the Art Speaker Recognition and Natural Language Processing Technologies to Detect Alzheimer’s Disease and Assess its Severity</div><div class="cpsessionviewpaperauthor">[[Raghavendra Pappagari|AUTHOR Raghavendra Pappagari]], [[Jaejin Cho|AUTHOR Jaejin Cho]], [[Laureano Moro-Velázquez|AUTHOR Laureano Moro-Velázquez]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2635.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-6|PAPER Wed-SS-1-6-6 — A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition]]</div>|<div class="cpsessionviewpapertitle">A Comparison of Acoustic and Linguistics Methodologies for Alzheimer’s Dementia Recognition</div><div class="cpsessionviewpaperauthor">[[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Yilin Pan|AUTHOR Yilin Pan]], [[Zhao Ren|AUTHOR Zhao Ren]], [[Julian Fritsch|AUTHOR Julian Fritsch]], [[Venkata Srikanth Nallanthighal|AUTHOR Venkata Srikanth Nallanthighal]], [[Heidi Christensen|AUTHOR Heidi Christensen]], [[Daniel Blackburn|AUTHOR Daniel Blackburn]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]], [[Mathew Magimai-Doss|AUTHOR Mathew Magimai-Doss]], [[Helmer Strik|AUTHOR Helmer Strik]], [[Aki Härmä|AUTHOR Aki Härmä]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2721.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-7|PAPER Wed-SS-1-6-7 — Multi-Modal Fusion with Gating Using Audio, Lexical and Disfluency Features for Alzheimer’s Dementia Recognition from Spontaneous Speech]]</div>|<div class="cpsessionviewpapertitle">Multi-Modal Fusion with Gating Using Audio, Lexical and Disfluency Features for Alzheimer’s Dementia Recognition from Spontaneous Speech</div><div class="cpsessionviewpaperauthor">[[Morteza Rohanian|AUTHOR Morteza Rohanian]], [[Julian Hough|AUTHOR Julian Hough]], [[Matthew Purver|AUTHOR Matthew Purver]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2729.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-8|PAPER Wed-SS-1-6-8 — Comparing Natural Language Processing Techniques for Alzheimer’s Dementia Prediction in Spontaneous Speech]]</div>|<div class="cpsessionviewpapertitle">Comparing Natural Language Processing Techniques for Alzheimer’s Dementia Prediction in Spontaneous Speech</div><div class="cpsessionviewpaperauthor">[[Thomas Searle|AUTHOR Thomas Searle]], [[Zina Ibrahim|AUTHOR Zina Ibrahim]], [[Richard Dobson|AUTHOR Richard Dobson]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2781.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-9|PAPER Wed-SS-1-6-9 — Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech]]</div>|<div class="cpsessionviewpapertitle">Multiscale System for Alzheimer’s Dementia Recognition Through Spontaneous Speech</div><div class="cpsessionviewpaperauthor">[[Erik Edwards|AUTHOR Erik Edwards]], [[Charles Dognin|AUTHOR Charles Dognin]], [[Bajibabu Bollepalli|AUTHOR Bajibabu Bollepalli]], [[Maneesh Singh|AUTHOR Maneesh Singh]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2833.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-10|PAPER Wed-SS-1-6-10 — The INESC-ID Multi-Modal System for the ADReSS 2020 Challenge]]</div>|<div class="cpsessionviewpapertitle">The INESC-ID Multi-Modal System for the ADReSS 2020 Challenge</div><div class="cpsessionviewpaperauthor">[[Anna Pompili|AUTHOR Anna Pompili]], [[Thomas Rolland|AUTHOR Thomas Rolland]], [[Alberto Abad|AUTHOR Alberto Abad]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3085.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-11|PAPER Wed-SS-1-6-11 — Exploring MMSE Score Prediction Using Verbal and Non-Verbal Cues]]</div>|<div class="cpsessionviewpapertitle">Exploring MMSE Score Prediction Using Verbal and Non-Verbal Cues</div><div class="cpsessionviewpaperauthor">[[Shahla Farzana|AUTHOR Shahla Farzana]], [[Natalie Parde|AUTHOR Natalie Parde]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3137.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-12|PAPER Wed-SS-1-6-12 — Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity]]</div>|<div class="cpsessionviewpapertitle">Multimodal Inductive Transfer Learning for Detection of Alzheimer’s Dementia and its Severity</div><div class="cpsessionviewpaperauthor">[[Utkarsh Sarawgi|AUTHOR Utkarsh Sarawgi]], [[Wazeer Zulfikar|AUTHOR Wazeer Zulfikar]], [[Nouran Soliman|AUTHOR Nouran Soliman]], [[Pattie Maes|AUTHOR Pattie Maes]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3153.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-13|PAPER Wed-SS-1-6-13 — Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition]]</div>|<div class="cpsessionviewpapertitle">Exploiting Multi-Modal Features from Pre-Trained Networks for Alzheimer’s Dementia Recognition</div><div class="cpsessionviewpaperauthor">[[Junghyun Koo|AUTHOR Junghyun Koo]], [[Jie Hwan Lee|AUTHOR Jie Hwan Lee]], [[Jaewoo Pyo|AUTHOR Jaewoo Pyo]], [[Yujin Jo|AUTHOR Yujin Jo]], [[Kyogu Lee|AUTHOR Kyogu Lee]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3158.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-1-6-14|PAPER Wed-SS-1-6-14 — Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech]]</div>|<div class="cpsessionviewpapertitle">Automated Screening for Alzheimer’s Dementia Through Spontaneous Speech</div><div class="cpsessionviewpaperauthor">[[Muhammad Shehram Shah Syed|AUTHOR Muhammad Shehram Shah Syed]], [[Zafi Sherhan Syed|AUTHOR Zafi Sherhan Syed]], [[Margaret Lech|AUTHOR Margaret Lech]], [[Elena Pirogova|AUTHOR Elena Pirogova]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 3|<|
|^Chairs:&nbsp;|^John H.L. Hansen|
|^&nbsp;|^Xugang Lu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1252.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-3-1|PAPER Wed-SS-2-3-1 — Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments]]</div>|<div class="cpsessionviewpapertitle">Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments</div><div class="cpsessionviewpaperauthor">[[Jens Heitkaemper|AUTHOR Jens Heitkaemper]], [[Joerg Schmalenstroeer|AUTHOR Joerg Schmalenstroeer]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1666.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-3-2|PAPER Wed-SS-2-3-2 — Speaker Diarization System Based on DPCA Algorithm for Fearless Steps Challenge Phase-2]]</div>|<div class="cpsessionviewpapertitle">Speaker Diarization System Based on DPCA Algorithm for Fearless Steps Challenge Phase-2</div><div class="cpsessionviewpaperauthor">[[Xueshuai Zhang|AUTHOR Xueshuai Zhang]], [[Wenchao Wang|AUTHOR Wenchao Wang]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1915.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-3-3|PAPER Wed-SS-2-3-3 — The DKU Speech Activity Detection and Speaker Identification Systems for Fearless Steps Challenge Phase-02]]</div>|<div class="cpsessionviewpapertitle">The DKU Speech Activity Detection and Speaker Identification Systems for Fearless Steps Challenge Phase-02</div><div class="cpsessionviewpaperauthor">[[Qingjian Lin|AUTHOR Qingjian Lin]], [[Tingle Li|AUTHOR Tingle Li]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2822.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-3-4|PAPER Wed-SS-2-3-4 — “This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)]]</div>|<div class="cpsessionviewpapertitle">“This is Houston. Say again, please”. The Behavox System for the Apollo-11 Fearless Steps Challenge (Phase II)</div><div class="cpsessionviewpaperauthor">[[Arseniy Gorin|AUTHOR Arseniy Gorin]], [[Daniil Kulko|AUTHOR Daniil Kulko]], [[Steven Grima|AUTHOR Steven Grima]], [[Alex Glasman|AUTHOR Alex Glasman]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3054.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-3-5|PAPER Wed-SS-2-3-5 — FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data]]</div>|<div class="cpsessionviewpapertitle">FEARLESS STEPS Challenge (FS-2): Supervised Learning with Massive Naturalistic Apollo Data</div><div class="cpsessionviewpaperauthor">[[Aditya Joglekar|AUTHOR Aditya Joglekar]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]], [[Meena Chandra Shekar|AUTHOR Meena Chandra Shekar]], [[Abhijeet Sangwan|AUTHOR Abhijeet Sangwan]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|20:30–21:30, Wednesday 28 October 2020, Room 7|<|
|^Chairs:&nbsp;|^Hiroki Tanaka|
|^&nbsp;|^Tanja Schultz|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1559.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-7-1|PAPER Wed-SS-2-7-1 — Combining Audio and Brain Activity for Predicting Speech Quality]]</div>|<div class="cpsessionviewpapertitle">Combining Audio and Brain Activity for Predicting Speech Quality</div><div class="cpsessionviewpaperauthor">[[Ivan Halim Parmonangan|AUTHOR Ivan Halim Parmonangan]], [[Hiroki Tanaka|AUTHOR Hiroki Tanaka]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2383.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-7-2|PAPER Wed-SS-2-7-2 — The “Sound of Silence” in EEG — Cognitive Voice Activity Detection]]</div>|<div class="cpsessionviewpapertitle">The “Sound of Silence” in EEG — Cognitive Voice Activity Detection</div><div class="cpsessionviewpaperauthor">[[Rini A. Sharon|AUTHOR Rini A. Sharon]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2496.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-7-3|PAPER Wed-SS-2-7-3 — Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals]]</div>|<div class="cpsessionviewpapertitle">Low Latency Auditory Attention Detection with Common Spatial Pattern Analysis of EEG Signals</div><div class="cpsessionviewpaperauthor">[[Siqi Cai|AUTHOR Siqi Cai]], [[Enze Su|AUTHOR Enze Su]], [[Yonghao Song|AUTHOR Yonghao Song]], [[Longhan Xie|AUTHOR Longhan Xie]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2946.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-7-4|PAPER Wed-SS-2-7-4 — Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach]]</div>|<div class="cpsessionviewpapertitle">Speech Spectrogram Estimation from Intracranial Brain Activity Using a Quantization Approach</div><div class="cpsessionviewpaperauthor">[[Miguel Angrick|AUTHOR Miguel Angrick]], [[Christian Herff|AUTHOR Christian Herff]], [[Garett Johnson|AUTHOR Garett Johnson]], [[Jerry Shih|AUTHOR Jerry Shih]], [[Dean Krusienski|AUTHOR Dean Krusienski]], [[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3071.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-2-7-5|PAPER Wed-SS-2-7-5 — Neural Speech Decoding for Amyotrophic Lateral Sclerosis]]</div>|<div class="cpsessionviewpapertitle">Neural Speech Decoding for Amyotrophic Lateral Sclerosis</div><div class="cpsessionviewpaperauthor">[[Debadatta Dash|AUTHOR Debadatta Dash]], [[Paul Ferrari|AUTHOR Paul Ferrari]], [[Angel Hernandez|AUTHOR Angel Hernandez]], [[Daragh Heitzman|AUTHOR Daragh Heitzman]], [[Sara G. Austin|AUTHOR Sara G. Austin]], [[Jun Wang|AUTHOR Jun Wang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|21:45–22:45, Wednesday 28 October 2020, Room 11|<|
|^Chairs:&nbsp;|^Ming Li|
|^&nbsp;|^Wei Rao|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1249.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-3-11-1|PAPER Wed-SS-3-11-1 — The INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|<div class="cpsessionviewpapertitle">The INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div><div class="cpsessionviewpaperauthor">[[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Ming Li|AUTHOR Ming Li]], [[Hui Bu|AUTHOR Hui Bu]], [[Wei Rao|AUTHOR Wei Rao]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/1354.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-3-11-2|PAPER Wed-SS-3-11-2 — Deep Embedding Learning for Text-Dependent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Deep Embedding Learning for Text-Dependent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Peng Zhang|AUTHOR Peng Zhang]], [[Peng Hu|AUTHOR Peng Hu]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2580.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-3-11-3|PAPER Wed-SS-3-11-3 — STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020]]</div>|<div class="cpsessionviewpapertitle">STC-Innovation Speaker Recognition Systems for Far-Field Speaker Verification Challenge 2020</div><div class="cpsessionviewpaperauthor">[[Aleksei Gusev|AUTHOR Aleksei Gusev]], [[Vladimir Volokhov|AUTHOR Vladimir Volokhov]], [[Alisa Vinogradova|AUTHOR Alisa Vinogradova]], [[Tseren Andzhukaev|AUTHOR Tseren Andzhukaev]], [[Andrey Shulipa|AUTHOR Andrey Shulipa]], [[Sergey Novoselov|AUTHOR Sergey Novoselov]], [[Timur Pekhovsky|AUTHOR Timur Pekhovsky]], [[Alexander Kozlov|AUTHOR Alexander Kozlov]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/2688.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-3-11-4|PAPER Wed-SS-3-11-4 — NPU Speaker Verification System for INTERSPEECH 2020 Far-Field Speaker Verification Challenge]]</div>|<div class="cpsessionviewpapertitle">NPU Speaker Verification System for INTERSPEECH 2020 Far-Field Speaker Verification Challenge</div><div class="cpsessionviewpaperauthor">[[Li Zhang|AUTHOR Li Zhang]], [[Jian Wu|AUTHOR Jian Wu]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="https://isca-speech.org/archive/Interspeech_2020/pdfs/3062.pdf" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-3-11-5|PAPER Wed-SS-3-11-5 — The JD AI Speaker Verification System for the FFSVC 2020 Challenge]]</div>|<div class="cpsessionviewpapertitle">The JD AI Speaker Verification System for the FFSVC 2020 Challenge</div><div class="cpsessionviewpaperauthor">[[Ying Tong|AUTHOR Ying Tong]], [[Wei Xue|AUTHOR Wei Xue]], [[Shanluo Huang|AUTHOR Shanluo Huang]], [[Lu Fan|AUTHOR Lu Fan]], [[Chao Zhang|AUTHOR Chao Zhang]], [[Guohong Ding|AUTHOR Guohong Ding]], [[Xiaodong He|AUTHOR Xiaodong He]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}}
</p></div>

<div class="cpsupportpage">
This HTML index file is based on the [ext[TiddlyWiki|http://www.tiddlywiki.com]] web application.
You can browse the table of contents, author index, and individual paper details, and launch the paper PDF file to a separate window.
</div>

|cpsupportpagetable|k
|cptightlineheight|k
|cptablecelltopbottomspace2|k
|PDF&nbsp;Reader |This publication has been designed for use with Adobe Reader 8 or later to view the PDF files.|
|^Support |If you have problems with this publication, please contact Causal Productions at:<div class="cpmailingaddress">Causal Productions Pty Ltd<br>PO Box<$link to="$:/causal/Causal Productions Configurator Control Panel"> </$link>100<br>Rundle Mall<br>SA 5000<br>Australia</div>|
|Phone |+61 8 8295 8200|
|E-mail |[ext[info@causalproductions.com|mailto:info@causalproductions.com]]|
|Web |[ext[http://www.causalproductions.com|http://www.causalproductions.com]]|
\rules except wikilink
<div class="cppublicationname">INTERSPEECH 2020</div><div class="cppublicationdatevenue">October 25–29, 2020, Shanghai, China<span><a href="http://www.interspeech2020.org" target="_blank"><$button><small>Conference Website</small></$button></a></span></div>

|cpborderless|k
|cpwelcomepageconferencetable|k
|cph3|k
|<hr>|<|<|
| <div class="cpwelcomepagespaceaboveiconwithoutconferencename icon_size_on_welcome_page">{{$:/causal/image/INTERSPEECH 2020 WELCOME.SVG}}</div> |<div class="cpwelcomepageconferencelinks">[[Conference Information]]<br>[[Session List]]<br>[[Author Index]] </div> |
|<hr>|<|<|
|[[Copyright Statement]] |[[Support]] |
<div class="cpwelcomepagecopyright">
{{$:/causal/publication/Copyright Statement}}
</div>